diff --git a/README.md b/README.md index c3acd954..e720054b 100644 --- a/README.md +++ b/README.md @@ -118,179 +118,7 @@ agent.run("Generate a 10,000 word blog on health and wellness.") import os from dotenv import load_dotenv from swarms import Agent, OpenAIChat -from playground.memory.chromadb_example import ChromaDB -import logging -import os -import uuid -from typing import Optional -import chromadb -from swarms.utils.data_to_text import data_to_text -from swarms.utils.markdown_message import display_markdown_message -from swarms.memory.base_vectordb import BaseVectorDatabase - -# Load environment variables -load_dotenv() - - -# Results storage using local ChromaDB -class ChromaDB(BaseVectorDatabase): - """ - - ChromaDB database - - Args: - metric (str): The similarity metric to use. - output (str): The name of the collection to store the results in. - limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000. - n_results (int, optional): The number of results to retrieve. Defaults to 2. - - Methods: - add: _description_ - query: _description_ - - Examples: - >>> chromadb = ChromaDB( - >>> metric="cosine", - >>> output="results", - >>> llm="gpt3", - >>> openai_api_key=OPENAI_API_KEY, - >>> ) - >>> chromadb.add(task, result, result_id) - """ - - def __init__( - self, - metric: str = "cosine", - output_dir: str = "swarms", - limit_tokens: Optional[int] = 1000, - n_results: int = 3, - docs_folder: str = None, - verbose: bool = False, - *args, - **kwargs, - ): - self.metric = metric - self.output_dir = output_dir - self.limit_tokens = limit_tokens - self.n_results = n_results - self.docs_folder = docs_folder - self.verbose = verbose - - # Disable ChromaDB logging - if verbose: - logging.getLogger("chromadb").setLevel(logging.INFO) - - # Create Chroma collection - chroma_persist_dir = "chroma" - chroma_client = chromadb.PersistentClient( - settings=chromadb.config.Settings( - persist_directory=chroma_persist_dir, - ), - *args, - **kwargs, - ) - - # Create ChromaDB client - self.client = chromadb.Client() - - # Create Chroma collection - self.collection = chroma_client.get_or_create_collection( - name=output_dir, - metadata={"hnsw:space": metric}, - *args, - **kwargs, - ) - display_markdown_message( - "ChromaDB collection created:" - f" {self.collection.name} with metric: {self.metric} and" - f" output directory: {self.output_dir}" - ) - - # If docs - if docs_folder: - display_markdown_message( - f"Traversing directory: {docs_folder}" - ) - self.traverse_directory() - - def add( - self, - document: str, - *args, - **kwargs, - ): - """ - Add a document to the ChromaDB collection. - - Args: - document (str): The document to be added. - condition (bool, optional): The condition to check before adding the document. Defaults to True. - - Returns: - str: The ID of the added document. - """ - try: - doc_id = str(uuid.uuid4()) - self.collection.add( - ids=[doc_id], - documents=[document], - *args, - **kwargs, - ) - print("-----------------") - print("Document added successfully") - print("-----------------") - return doc_id - except Exception as e: - raise Exception(f"Failed to add document: {str(e)}") - - def query( - self, - query_text: str, - *args, - **kwargs, - ): - """ - Query documents from the ChromaDB collection. - - Args: - query (str): The query string. - n_docs (int, optional): The number of documents to retrieve. Defaults to 1. - - Returns: - dict: The retrieved documents. - """ - try: - docs = self.collection.query( - query_texts=[query_text], - n_results=self.n_results, - *args, - **kwargs, - )["documents"] - return docs[0] - except Exception as e: - raise Exception(f"Failed to query documents: {str(e)}") - - def traverse_directory(self): - """ - Traverse through every file in the given directory and its subdirectories, - and return the paths of all files. - Parameters: - - directory_name (str): The name of the directory to traverse. - Returns: - - list: A list of paths to each file in the directory and its subdirectories. - """ - added_to_db = False - - for root, dirs, files in os.walk(self.docs_folder): - for file in files: - file_path = os.path.join(root, file) # Change this line - _, ext = os.path.splitext(file_path) - data = data_to_text(file_path) - added_to_db = self.add(str(data)) - print(f"{file_path} added to Database") - - return added_to_db +from swarms_memory import ChromaDB # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") @@ -334,218 +162,108 @@ An LLM equipped with long term memory and tools, a full stack agent capable of a ```python import logging -import os -import uuid -from typing import Optional - -import chromadb from dotenv import load_dotenv - -from swarms.utils.data_to_text import data_to_text -from swarms.utils.markdown_message import display_markdown_message -from swarms.memory.base_vectordb import BaseVectorDatabase from swarms import Agent, OpenAIChat +import subprocess +# Making an instance of the ChromaDB class +memory = ChromaDB( + metric="cosine", + n_results=3, + output_dir="results", + docs_folder="docs", +) -# Load environment variables -load_dotenv() +# Tools +def terminal( + code: str, +): + """ + Run code in the terminal. + + Args: + code (str): The code to run in the terminal. + + Returns: + str: The output of the code. + """ + out = subprocess.run( + code, shell=True, capture_output=True, text=True + ).stdout + return str(out) +def browser(query: str): + """ + Search the query in the browser with the `browser` tool. + Args: + query (str): The query to search in the browser. -# Results storage using local ChromaDB -class ChromaDB(BaseVectorDatabase): + Returns: + str: The search results. """ + import webbrowser + + url = f"https://www.google.com/search?q={query}" + webbrowser.open(url) + return f"Searching for {query} in the browser." - ChromaDB database +def create_file(file_path: str, content: str): + """ + Create a file using the file editor tool. Args: - metric (str): The similarity metric to use. - output (str): The name of the collection to store the results in. - limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000. - n_results (int, optional): The number of results to retrieve. Defaults to 2. - - Methods: - add: _description_ - query: _description_ - - Examples: - >>> chromadb = ChromaDB( - >>> metric="cosine", - >>> output="results", - >>> llm="gpt3", - >>> openai_api_key=OPENAI_API_KEY, - >>> ) - >>> chromadb.add(task, result, result_id) + file_path (str): The path to the file. + content (str): The content to write to the file. + + Returns: + str: The result of the file creation operation. """ + with open(file_path, "w") as file: + file.write(content) + return f"File {file_path} created successfully." - def __init__( - self, - metric: str = "cosine", - output_dir: str = "swarms", - limit_tokens: Optional[int] = 1000, - n_results: int = 3, - docs_folder: str = None, - verbose: bool = False, - *args, - **kwargs, - ): - self.metric = metric - self.output_dir = output_dir - self.limit_tokens = limit_tokens - self.n_results = n_results - self.docs_folder = docs_folder - self.verbose = verbose - - # Disable ChromaDB logging - if verbose: - logging.getLogger("chromadb").setLevel(logging.INFO) - - # Create Chroma collection - chroma_persist_dir = "chroma" - chroma_client = chromadb.PersistentClient( - settings=chromadb.config.Settings( - persist_directory=chroma_persist_dir, - ), - *args, - **kwargs, - ) - - # Create ChromaDB client - self.client = chromadb.Client() - - # Create Chroma collection - self.collection = chroma_client.get_or_create_collection( - name=output_dir, - metadata={"hnsw:space": metric}, - *args, - **kwargs, - ) - display_markdown_message( - "ChromaDB collection created:" - f" {self.collection.name} with metric: {self.metric} and" - f" output directory: {self.output_dir}" - ) - - # If docs - if docs_folder: - display_markdown_message( - f"Traversing directory: {docs_folder}" - ) - self.traverse_directory() - - def add( - self, - document: str, - *args, - **kwargs, - ): - """ - Add a document to the ChromaDB collection. - - Args: - document (str): The document to be added. - condition (bool, optional): The condition to check before adding the document. Defaults to True. - - Returns: - str: The ID of the added document. - """ - try: - doc_id = str(uuid.uuid4()) - self.collection.add( - ids=[doc_id], - documents=[document], - *args, - **kwargs, - ) - print("-----------------") - print("Document added successfully") - print("-----------------") - return doc_id - except Exception as e: - raise Exception(f"Failed to add document: {str(e)}") - - def query( - self, - query_text: str, - *args, - **kwargs, - ): - """ - Query documents from the ChromaDB collection. - - Args: - query (str): The query string. - n_docs (int, optional): The number of documents to retrieve. Defaults to 1. - - Returns: - dict: The retrieved documents. - """ - try: - docs = self.collection.query( - query_texts=[query_text], - n_results=self.n_results, - *args, - **kwargs, - )["documents"] - return docs[0] - except Exception as e: - raise Exception(f"Failed to query documents: {str(e)}") - - def traverse_directory(self): - """ - Traverse through every file in the given directory and its subdirectories, - and return the paths of all files. - Parameters: - - directory_name (str): The name of the directory to traverse. - Returns: - - list: A list of paths to each file in the directory and its subdirectories. - """ - added_to_db = False - - for root, dirs, files in os.walk(self.docs_folder): - for file in files: - file_path = os.path.join(root, file) # Change this line - _, ext = os.path.splitext(file_path) - data = data_to_text(file_path) - added_to_db = self.add(str(data)) - print(f"{file_path} added to Database") - - return added_to_db +def file_editor(file_path: str, mode: str, content: str): + """ + Edit a file using the file editor tool. + Args: + file_path (str): The path to the file. + mode (str): The mode to open the file in. + content (str): The content to write to the file. -# Making an instance of the ChromaDB class -memory = ChromaDB( - metric="cosine", - n_results=3, - output_dir="results", - docs_folder="docs", -) + Returns: + str: The result of the file editing operation. + """ + with open(file_path, mode) as file: + file.write(content) + return f"File {file_path} edited successfully." -# Initialize a tool -def search_api(query: str): - # Add your logic here - return query -# Initializing the agent with the Gemini instance and other parameters +# Agent agent = Agent( - agent_name="Covid-19-Chat", - agent_description=( - "This agent provides information about COVID-19 symptoms." + agent_name="Devin", + system_prompt=( + "Autonomous agent that can interact with humans and other" + " agents. Be Helpful and Kind. Use the tools provided to" + " assist the user. Return all code in markdown format." ), - llm=OpenAIChat(), + llm=llm, max_loops="auto", autosave=True, + dashboard=False, + streaming_on=True, verbose=True, + stopping_token="", + interactive=True, + tools=[terminal, browser, file_editor, create_file], + code_interpreter=True, + # streaming=True, long_term_memory=memory, - stopping_condition="finish", - tools=[search_api], ) -# Defining the task and image path -task = ("What are the symptoms of COVID-19?",) - -# Running the agent with the specified task and image -out = agent.run(task) +# Run the agent +out = agent("Create a new file for a plan to take over the world.") print(out) ``` diff --git a/docs/swarms/structs/index.md b/docs/swarms/structs/index.md index b4ab01c3..ca5c9111 100644 --- a/docs/swarms/structs/index.md +++ b/docs/swarms/structs/index.md @@ -72,7 +72,7 @@ agent.run("Generate a 10,000 word blog on health and wellness.") ```python from swarms import Agent, OpenAIChat -from playground.memory.chromadb_example import ChromaDB # Copy and paste the code and put it in your own local directory. +from swarms_memory import ChromaDB # Copy and paste the code and put it in your own local directory. # Making an instance of the ChromaDB class memory = ChromaDB( diff --git a/playground/agents/agent_with_long_term_memory.py b/playground/agents/agent_with_long_term_memory.py index 3a07f246..d8fc2861 100644 --- a/playground/agents/agent_with_long_term_memory.py +++ b/playground/agents/agent_with_long_term_memory.py @@ -1,5 +1,5 @@ from swarms import Agent, OpenAIChat -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB from swarms.models.tiktoken_wrapper import TikTokenizer # Initialize the agent diff --git a/playground/agents/agent_with_longterm_memory.py b/playground/agents/agent_with_longterm_memory.py index dc73b8c1..36e32081 100644 --- a/playground/agents/agent_with_longterm_memory.py +++ b/playground/agents/agent_with_longterm_memory.py @@ -4,7 +4,7 @@ from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct from swarms import Agent, OpenAIChat -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB # Load the environment variables load_dotenv() diff --git a/playground/agents/new_perplexity_agent.py b/playground/agents/new_perplexity_agent.py index 5e2032bd..272041de 100644 --- a/playground/agents/new_perplexity_agent.py +++ b/playground/agents/new_perplexity_agent.py @@ -1,6 +1,6 @@ from swarms import Agent from swarms.models.llama3_hosted import llama3Hosted -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api # Define the research system prompt diff --git a/playground/agents/perplexity_agent.py b/playground/agents/perplexity_agent.py index 6390a873..0faab2cf 100644 --- a/playground/agents/perplexity_agent.py +++ b/playground/agents/perplexity_agent.py @@ -10,7 +10,7 @@ $ pip install swarms """ from swarms import Agent, OpenAIChat -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api import os from dotenv import load_dotenv diff --git a/playground/demos/patient_question_assist/main.py b/playground/demos/patient_question_assist/main.py index 1c3d7133..45b31cb4 100644 --- a/playground/demos/patient_question_assist/main.py +++ b/playground/demos/patient_question_assist/main.py @@ -1,6 +1,6 @@ from swarms import Agent, OpenAIChat from typing import List -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB memory = ChromaDB( metric="cosine", diff --git a/playground/demos/swarm_mechanic/swarm_mechanic_example.py b/playground/demos/swarm_mechanic/swarm_mechanic_example.py index 9fa2104d..5875c2e8 100644 --- a/playground/demos/swarm_mechanic/swarm_mechanic_example.py +++ b/playground/demos/swarm_mechanic/swarm_mechanic_example.py @@ -15,7 +15,7 @@ task -> Understanding Agent [understands the problem better] -> Summarize of the from swarms import Agent, llama3Hosted, AgentRearrange from pydantic import BaseModel -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB # Initialize the language model agent (e.g., GPT-3) llm = llama3Hosted(max_tokens=3000) diff --git a/playground/demos/swarm_of_complaince/compliance_swarm.py b/playground/demos/swarm_of_complaince/compliance_swarm.py index 62f296a2..63cee018 100644 --- a/playground/demos/swarm_of_complaince/compliance_swarm.py +++ b/playground/demos/swarm_of_complaince/compliance_swarm.py @@ -11,7 +11,7 @@ Todo [Improvements] from swarms import Agent from swarms.models.llama3_hosted import llama3Hosted -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB # Model diff --git a/playground/structs/multi_agent_collaboration/mixture_of_agents/agent_ops_moa.py b/playground/structs/multi_agent_collaboration/mixture_of_agents/agent_ops_moa.py index 5b7df470..2274f956 100644 --- a/playground/structs/multi_agent_collaboration/mixture_of_agents/agent_ops_moa.py +++ b/playground/structs/multi_agent_collaboration/mixture_of_agents/agent_ops_moa.py @@ -1,6 +1,6 @@ from swarms import Agent, OpenAIChat from swarms.structs.mixture_of_agents import MixtureOfAgents -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB SEC_DATA = """ diff --git a/playground/structs/multi_agent_collaboration/mixture_of_agents/moa_with_scp.py b/playground/structs/multi_agent_collaboration/mixture_of_agents/moa_with_scp.py index 1596bfff..e61d1536 100644 --- a/playground/structs/multi_agent_collaboration/mixture_of_agents/moa_with_scp.py +++ b/playground/structs/multi_agent_collaboration/mixture_of_agents/moa_with_scp.py @@ -1,6 +1,6 @@ from swarms import Agent, OpenAIChat from swarms.structs.mixture_of_agents import MixtureOfAgents -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB SEC_DATA = """ diff --git a/playground/structs/multi_agent_collaboration/swarm_network_example.py b/playground/structs/multi_agent_collaboration/swarm_network_example.py index 69cbe0ef..d0f01a3e 100644 --- a/playground/structs/multi_agent_collaboration/swarm_network_example.py +++ b/playground/structs/multi_agent_collaboration/swarm_network_example.py @@ -6,7 +6,7 @@ from swarms import ( OpenAIChat, TogetherLLM, ) -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB from dotenv import load_dotenv # load the environment variables diff --git a/playground/swarms/movers_swarm.py b/playground/swarms/movers_swarm.py index 7600ec64..c4625876 100644 --- a/playground/swarms/movers_swarm.py +++ b/playground/swarms/movers_swarm.py @@ -10,7 +10,7 @@ $ pip install swarms """ from swarms import Agent, OpenAIChat -from playground.memory.chromadb_example import ChromaDB +from swarms_memory import ChromaDB from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api import os from dotenv import load_dotenv diff --git a/playground/swarms_example.ipynb b/playground/swarms_example.ipynb index ece6101d..83951984 100644 --- a/playground/swarms_example.ipynb +++ b/playground/swarms_example.ipynb @@ -96,7 +96,7 @@ "outputs": [], "source": [ "from swarms import Agent, OpenAIChat\n", - "from playground.memory.chromadb_example import ChromaDB\n", + "from swarms_memory import ChromaDB\n", "\n", "# Making an instance of the ChromaDB class\n", "memory = ChromaDB(\n", diff --git a/pyproject.toml b/pyproject.toml index ec96d346..81b0f860 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "5.3.2" +version = "5.3.3" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] @@ -55,6 +55,7 @@ openai = ">=1.30.1,<2.0" termcolor = "*" tiktoken = "*" networkx = "*" +swarms-memory = "*" diff --git a/requirements.txt b/requirements.txt index 4273cc06..22ba41f8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,4 +28,5 @@ pytest>=8.1.1 termcolor>=2.4.0 pandas>=2.2.2 fastapi>=0.110.1 -networkx \ No newline at end of file +networkx +swarms-memory diff --git a/json_log_cleanup.py b/scripts/cleanup/json_log_cleanup.py similarity index 100% rename from json_log_cleanup.py rename to scripts/cleanup/json_log_cleanup.py