From fe5a3180c19c0d5f2e3d729f0987be0df6b1651c Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 25 Apr 2024 00:14:01 -0400 Subject: [PATCH] [DOCS] --- docs/swarms/memory/diy_memory.md | 46 ++++++++- docs/swarms/models/vllm.md | 157 ------------------------------- 2 files changed, 44 insertions(+), 159 deletions(-) delete mode 100644 docs/swarms/models/vllm.md diff --git a/docs/swarms/memory/diy_memory.md b/docs/swarms/memory/diy_memory.md index 860b4115..3c550a29 100644 --- a/docs/swarms/memory/diy_memory.md +++ b/docs/swarms/memory/diy_memory.md @@ -1,5 +1,3 @@ - - # Building Custom Vector Memory Databases with the AbstractVectorDatabase Class In the age of large language models (LLMs) and AI-powered applications, efficient memory management has become a crucial component. Vector databases, which store and retrieve data in high-dimensional vector spaces, have emerged as powerful tools for handling the vast amounts of data generated and consumed by AI systems. However, integrating vector databases into your applications can be a daunting task, requiring in-depth knowledge of their underlying architectures and APIs. @@ -365,6 +363,50 @@ class FAISSVectorDatabase(MyCustomVectorDatabase): ``` +Now, how do you integrate a vector datbase with an agent? This is how: + +## Integrate Memory with `Agent` + +```python +import os + +from dotenv import load_dotenv + +# Import the OpenAIChat model and the Agent struct +from swarms import Agent, OpenAIChat + +# Load the environment variables +load_dotenv() + +# Get the API key from the environment +api_key = os.environ.get("OPENAI_API_KEY") + + +# Initilaize the chromadb client +faiss = FAISSVectorDatabase() + +# Initialize the language model +llm = OpenAIChat( + temperature=0.5, + model_name="gpt-4", + openai_api_key=api_key, + max_tokens=1000, +) + +## Initialize the workflow +agent = Agent( + llm=llm, + max_loops=4, + autosave=True, + dashboard=True, + long_term_memory=faiss, +) + +# Run the workflow on a task +out = agent.run("Generate a 10,000 word blog on health and wellness.") +print(out) +``` + In this example, we define a `FAISSVectorDatabase` class that inherits from `MyCustomVectorDatabase`. Within the `__init__` method, we create a FAISS index and set the index path. We then implement the `connect()`, `close()`, `query()`, and `add()` methods specific to the FAISS library, assuming 64-dimensional vectors for simplicity. These examples provide a starting point for integrating various vector database solutions into your custom implementation. Each solution has its own strengths, weaknesses, and trade-offs, so it's essential to carefully evaluate your requirements and choose the solution that best fits your needs. diff --git a/docs/swarms/models/vllm.md b/docs/swarms/models/vllm.md deleted file mode 100644 index 778c1b2b..00000000 --- a/docs/swarms/models/vllm.md +++ /dev/null @@ -1,157 +0,0 @@ -# `vLLM` Documentation - -## Table of Contents -- [Overview](#overview) -- [Installation](#installation) -- [vLLM Class](#vllm-class) - - [Initialization](#initialization) - - [Methods](#methods) - - [run](#run) -- [Usage Examples](#usage-examples) -- [Common Issues and Troubleshooting](#common-issues-and-troubleshooting) -- [References and Resources](#references-and-resources) - ---- - -### Overview - -Welcome to the documentation for the vLLM (Variable-Length Language Model) library. vLLM is a powerful tool for generating text using pre-trained language models. This documentation will provide a comprehensive guide on how to use vLLM effectively. - -#### Purpose - -vLLM is designed to simplify the process of generating text using language models, specifically the Facebook `opt-13b` model. It allows you to fine-tune various parameters to achieve the desired text generation outcomes. - -#### Key Features - -- Seamless integration with the Facebook `opt-13b` language model. -- Flexible configuration options for model parameters. -- Support for generating text for various natural language processing tasks. - -### Installation - -Before using vLLM, you need to install swarms. You can install vLLM using `pip`: - -```bash -pip install swarms vllm -``` - -### vLLM Class - -The vLLM class is the core component of the vLLM library. It provides a high-level interface for generating text using the Facebook `opt-13b` language model. - -#### Initialization - -To initialize the vLLM class, you can use the following parameters: - -- `model_name` (str, optional): The name of the language model to use. Defaults to "facebook/opt-13b". -- `tensor_parallel_size` (int, optional): The size of the tensor parallelism. Defaults to 4. -- `trust_remote_code` (bool, optional): Whether to trust remote code. Defaults to False. -- `revision` (str, optional): The revision of the language model. Defaults to None. -- `temperature` (float, optional): The temperature parameter for text generation. Defaults to 0.5. -- `top_p` (float, optional): The top-p parameter for text generation. Defaults to 0.95. - -```python -from swarms.models import vLLM - -# Initialize vLLM with default parameters -vllm = vLLM() - -# Initialize vLLM with custom parameters -custom_vllm = vLLM( - model_name="custom/model", - tensor_parallel_size=8, - trust_remote_code=True, - revision="abc123", - temperature=0.7, - top_p=0.8, -) -``` - -#### Methods - -##### run - -The `run` method is used to generate text using the vLLM model. It takes a `task` parameter, which is a text prompt or description of the task you want the model to perform. It returns the generated text as a string. - -```python -# Generate text using vLLM -result = vllm.run("Generate a creative story about a dragon.") -print(result) -``` - -### Usage Examples - -Here are three usage examples demonstrating different ways to use vLLM: - -**Example 1: Basic Text Generation** - -```python -from swarms.models import vLLM - -# Initialize vLLM -vllm = vLLM() - -# Generate text for a given task -generated_text = vllm.run("Generate a summary of a scientific paper.") -print(generated_text) -``` - -**Example 2: Custom Model and Parameters** - -```python -from swarms.models import vLLM - -# Initialize vLLM with custom model and parameters -custom_vllm = vLLM( - model_name="custom/model", - tensor_parallel_size=8, - trust_remote_code=True, - revision="abc123", - temperature=0.7, - top_p=0.8, -) - -# Generate text with custom configuration -generated_text = custom_vllm.run("Create a poem about nature.") -print(generated_text) -``` - -**Example 3: Batch Processing** - -```python -from swarms.models import vLLM - -# Initialize vLLM -vllm = vLLM() - -# Generate multiple texts in batch -tasks = [ - "Translate the following sentence to French: 'Hello, world!'", - "Write a short story set in a futuristic world.", - "Summarize the main points of a news article about climate change.", -] - -for task in tasks: - generated_text = vllm.run(task) - print(generated_text) -``` - -### Common Issues and Troubleshooting - -- **ImportError**: If you encounter an `ImportError` related to vLLM, make sure you have installed it using `pip install vllm`. - -- **Model Configuration**: Ensure that you provide valid model names and configurations when initializing vLLM. Invalid model names or parameters can lead to errors. - -- **Text Generation**: Be cautious with text generation parameters like `temperature` and `top_p`. Experiment with different values to achieve the desired text quality. - -### References and Resources - -For more information and resources related to vLLM and language models, refer to the following: - -- [vLLM GitHub Repository](https://github.com/vllm/vllm) -- [Hugging Face Transformers Documentation](https://huggingface.co/transformers/) -- [Facebook `opt-13b` Model Documentation](https://huggingface.co/facebook/opt-13b) - ---- - -This concludes the documentation for the vLLM library. We hope this guide helps you effectively use vLLM for text generation tasks. If you have any questions or encounter issues, please refer to the troubleshooting section or seek assistance from the vLLM community. Happy text generation! \ No newline at end of file