From 63a5ad4fd55ea6d9f4e8b99dd1b401b78ce7e35e Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 20 Sep 2024 18:10:21 -0400 Subject: [PATCH] [DOCS][Swarm models] --- docs/swarms/models/index.md | 64 ++++++------------------------------- 1 file changed, 9 insertions(+), 55 deletions(-) diff --git a/docs/swarms/models/index.md b/docs/swarms/models/index.md index cbb321de..bcd862da 100644 --- a/docs/swarms/models/index.md +++ b/docs/swarms/models/index.md @@ -1,9 +1,6 @@ # Swarm Models - -## Install - ```bash $ pip3 install -U swarm-models ``` @@ -16,7 +13,7 @@ Welcome to the documentation for the llm section of the swarms package, designed 3. [Google PaLM](#google-palm) 4. [Anthropic](#anthropic) -### 1. OpenAI (swarms.agents.models.OpenAI) +### 1. OpenAI (swarm_models.OpenAI) The OpenAI class provides an interface to interact with OpenAI's language models. It allows both synchronous and asynchronous interactions. @@ -40,7 +37,7 @@ OpenAI(api_key: str, system: str = None, console: bool = True, model: str = None **Methods:** -- `generate(message: str, **kwargs) -> str`: Generate a response using the OpenAI model. +- `run(message: str, **kwargs) -> str`: Generate a response using the OpenAI model. - `generate_async(message: str, **kwargs) -> str`: Generate a response asynchronously. @@ -56,7 +53,7 @@ from swarm_models import OpenAI chat = OpenAI(api_key="YOUR_OPENAI_API_KEY") -response = chat.generate("Hello, how can I assist you?") +response = chat.run("Hello, how can I assist you?") print(response) ids = ["id1", "id2", "id3"] @@ -64,7 +61,7 @@ async_responses = asyncio.run(chat.ask_multiple(ids, "How is {id}?")) print(async_responses) ``` -### 2. HuggingFace (swarms.agents.models.HuggingFaceLLM) +### 2. HuggingFace (swarm_models.HuggingFaceLLM) The HuggingFaceLLM class allows interaction with language models from Hugging Face. @@ -87,7 +84,7 @@ HuggingFaceLLM(model_id: str, device: str = None, max_length: int = 20, quantize **Methods:** -- `generate(prompt_text: str, max_length: int = None) -> str`: Generate text based on a prompt. +- `run(prompt_text: str, max_length: int = None) -> str`: Generate text based on a prompt. **Usage Example:** ```python @@ -97,54 +94,11 @@ model_id = "gpt2" hugging_face_model = HuggingFaceLLM(model_id=model_id) prompt = "Once upon a time" -generated_text = hugging_face_model.generate(prompt) +generated_text = hugging_face_model.run(prompt) print(generated_text) ``` -### 3. Google PaLM (swarms.agents.models.GooglePalm) - -The GooglePalm class provides an interface for Google's PaLM Chat API. - -**Constructor:** -```python -GooglePalm(model_name: str = "models/chat-bison-001", google_api_key: str = None, temperature: float = None, top_p: float = None, top_k: int = None, n: int = 1) -``` - -**Attributes:** - -- `model_name` (str): Name of the Google PaLM model. - -- `google_api_key` (str, optional): Google API key. - -- `temperature` (float, optional): Temperature for text generation. - -- `top_p` (float, optional): Top-p sampling value. - -- `top_k` (int, optional): Top-k sampling value. - -- `n` (int, default=1): Number of candidate completions. - -**Methods:** - -- `generate(messages: List[Dict[str, Any]], stop: List[str] = None, **kwargs) -> Dict[str, Any]`: Generate text based on a list of messages. - -- `__call__(messages: List[Dict[str, Any]], stop: List[str] = None, **kwargs) -> Dict[str, Any]`: Generate text using the call syntax. - -**Usage Example:** -```python -from swarm_models import GooglePalm - -google_palm = GooglePalm() -messages = [ - {"role": "system", "content": "You are a helpful assistant"}, - {"role": "user", "content": "Tell me a joke"}, -] - -response = google_palm.generate(messages) -print(response["choices"][0]["text"]) -``` - -### 4. Anthropic (swarms.agents.models.Anthropic) +### 3. Anthropic (swarm_models.Anthropic) The Anthropic class enables interaction with Anthropic's large language models. @@ -171,7 +125,7 @@ Anthropic(model: str = "claude-2", max_tokens_to_sample: int = 256, temperature: **Methods:** -- `generate(prompt: str, stop: List[str] = None) -> str`: Generate text based on a prompt. +- `run(prompt: str, stop: List[str] = None) -> str`: Generate text based on a prompt. **Usage Example:** ```python @@ -179,7 +133,7 @@ from swarm_models import Anthropic anthropic = Anthropic() prompt = "Once upon a time" -generated_text = anthropic.generate(prompt) +generated_text = anthropic.run(prompt) print(generated_text) ```