diff --git a/README.md b/README.md index 3259d55b..4361355f 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ import os from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct -from swarms import OpenAIChat, Agent +from swarms import Agent, OpenAIChat # Load the environment variables load_dotenv() @@ -54,10 +54,7 @@ api_key = os.environ.get("OPENAI_API_KEY") # Initialize the language model llm = OpenAIChat( - temperature=0.5, - model_name="gpt-4", - openai_api_key=api_key, - max_tokens=4000 + temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000 ) @@ -66,9 +63,6 @@ agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True) # Run the workflow on a task agent.run("Generate a 10,000 word blog on health and wellness.") - - - ``` @@ -79,6 +73,7 @@ ToolAgent is an agent that outputs JSON using any model from huggingface. It tak ```python # Import necessary libraries from transformers import AutoModelForCausalLM, AutoTokenizer + from swarms import ToolAgent # Load the pre-trained model and tokenizer @@ -107,8 +102,6 @@ generated_data = agent.run(task) # Print the generated data print(generated_data) - - ``` @@ -124,8 +117,10 @@ The `Worker` is a simple all-in-one agent equipped with an LLM, tools, and RAG f ```python # Importing necessary modules import os + from dotenv import load_dotenv -from swarms import Worker, OpenAIChat, tool + +from swarms import OpenAIChat, Worker, tool # Loading environment variables from .env file load_dotenv() @@ -151,14 +146,10 @@ worker = Worker( ) # Running the worker with a prompt -out = worker.run( - "Hello, how are you? Create an image of how your are doing!" -) +out = worker.run("Hello, how are you? Create an image of how your are doing!") # Printing the output print(out) - - ``` ------ @@ -174,10 +165,12 @@ Sequential Workflow enables you to sequentially execute tasks with `Agent` and t ✅ Utilizes Agent class ```python -import os -from swarms import OpenAIChat, Agent, SequentialWorkflow +import os + from dotenv import load_dotenv +from swarms import Agent, OpenAIChat, SequentialWorkflow + load_dotenv() # Load the environment variables @@ -186,10 +179,7 @@ api_key = os.getenv("OPENAI_API_KEY") # Initialize the language agent llm = OpenAIChat( - temperature=0.5, - model_name="gpt-4", - openai_api_key=api_key, - max_tokens=4000 + temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000 ) @@ -207,12 +197,14 @@ workflow = SequentialWorkflow(max_loops=1) # Add tasks to the workflow workflow.add( - agent1, "Generate a 10,000 word blog on health and wellness.", + agent1, + "Generate a 10,000 word blog on health and wellness.", ) # Suppose the next task takes the output of the first task as input workflow.add( - agent2, "Summarize the generated blog", + agent2, + "Summarize the generated blog", ) # Run the workflow @@ -231,8 +223,10 @@ for task in workflow.tasks: ```python import os + from dotenv import load_dotenv -from swarms import OpenAIChat, Task, ConcurrentWorkflow, Agent + +from swarms import Agent, ConcurrentWorkflow, OpenAIChat, Task # Load environment variables from .env file load_dotenv() @@ -254,16 +248,17 @@ workflow.add(tasks=[task1, task2, task3]) # Run the workflow workflow.run() - ``` ### `RecursiveWorkflow` `RecursiveWorkflow` will keep executing the tasks until a specific token like is located inside the text! ```python -import os -from dotenv import load_dotenv -from swarms import OpenAIChat, Task, RecursiveWorkflow, Agent +import os + +from dotenv import load_dotenv + +from swarms import Agent, OpenAIChat, RecursiveWorkflow, Task # Load environment variables from .env file load_dotenv() @@ -287,8 +282,6 @@ workflow.add(task3) # Run the workflow workflow.run() - - ``` @@ -304,7 +297,7 @@ import os from dotenv import load_dotenv -from swarms import Anthropic, Gemini, Mixtral, OpenAIChat, ModelParallelizer +from swarms import Anthropic, Gemini, Mixtral, ModelParallelizer, OpenAIChat load_dotenv() @@ -346,10 +339,7 @@ import os from dotenv import load_dotenv -from swarms import ( - OpenAIChat, - Conversation, -) +from swarms import Conversation, OpenAIChat conv = Conversation( time_enabled=True, @@ -364,6 +354,7 @@ api_key = os.environ.get("OPENAI_API_KEY") # Initialize the language model llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4") + # Run the language model in a loop def interactive_conversation(llm): conv = Conversation() @@ -372,9 +363,7 @@ def interactive_conversation(llm): conv.add("user", user_input) if user_input.lower() == "quit": break - task = ( - conv.return_history_as_string() - ) # Get the conversation history + task = conv.return_history_as_string() # Get the conversation history out = llm(task) conv.add("assistant", out) print( @@ -386,7 +375,6 @@ def interactive_conversation(llm): # Replace with your LLM instance interactive_conversation(llm) - ``` @@ -405,7 +393,7 @@ import os from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct -from swarms import OpenAIChat, Agent, SwarmNetwork +from swarms import Agent, OpenAIChat, SwarmNetwork # Load the environment variables load_dotenv() @@ -442,11 +430,8 @@ print(out) # Run all the agents in the swarm network on a task -out = swarmnet.run_many_agents( - "Generate a 10,000 word blog on health and wellness." -) +out = swarmnet.run_many_agents("Generate a 10,000 word blog on health and wellness.") print(out) - ``` @@ -513,8 +498,6 @@ else: # Output the result of the task print(f"Task result: {task.result}") - - ``` --- @@ -535,14 +518,7 @@ from dotenv import load_dotenv from transformers import AutoModelForCausalLM, AutoTokenizer # Import the models, structs, and telemetry modules -from swarms import ( - Gemini, - GPT4VisionAPI, - Mixtral, - OpenAI, - ToolAgent, - BlocksList, -) +from swarms import BlocksList, Gemini, GPT4VisionAPI, Mixtral, OpenAI, ToolAgent # Load the environment variables load_dotenv() @@ -552,9 +528,7 @@ openai_api_key = os.getenv("OPENAI_API_KEY") gemini_api_key = os.getenv("GEMINI_API_KEY") # Tool Agent -model = AutoModelForCausalLM.from_pretrained( - "databricks/dolly-v2-12b" -) +model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b") tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b") json_schema = { "type": "object", @@ -565,9 +539,7 @@ json_schema = { "courses": {"type": "array", "items": {"type": "string"}}, }, } -toolagent = ToolAgent( - model=model, tokenizer=tokenizer, json_schema=json_schema -) +toolagent = ToolAgent(model=model, tokenizer=tokenizer, json_schema=json_schema) # Blocks List which enables you to build custom swarms by adding classes or functions swarm = BlocksList( @@ -619,9 +591,7 @@ blocks_by_parent_name = swarm.get_by_parent_name(swarm.name) blocks_by_parent_type = swarm.get_by_parent_type(type(swarm).__name__) # Get blocks by parent description -blocks_by_parent_description = swarm.get_by_parent_description( - swarm.description -) +blocks_by_parent_description = swarm.get_by_parent_description(swarm.description) # Run the block in the swarm inference = swarm.run_block(toolagent, "Hello World") @@ -636,25 +606,27 @@ Here's a production grade swarm ready for real-world deployment in a factory and ```python -from swarms.structs import Agent import os + from dotenv import load_dotenv + from swarms.models import GPT4VisionAPI from swarms.prompts.logistics import ( + Efficiency_Agent_Prompt, Health_Security_Agent_Prompt, - Quality_Control_Agent_Prompt, Productivity_Agent_Prompt, + Quality_Control_Agent_Prompt, Safety_Agent_Prompt, Security_Agent_Prompt, Sustainability_Agent_Prompt, - Efficiency_Agent_Prompt, ) +from swarms.structs import Agent # Load ENV load_dotenv() api_key = os.getenv("OPENAI_API_KEY") -# GPT4VisionAPI +# GPT4VisionAPI llm = GPT4VisionAPI(openai_api_key=api_key) # Image for analysis @@ -686,9 +658,7 @@ productivity_agent = Agent( ) # Initiailize safety agent -safety_agent = Agent( - llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True -) +safety_agent = Agent(llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True) # Init the security agent security_agent = Agent( @@ -748,7 +718,9 @@ Run the agent with multiple modalities useful for various real-world tasks in ma ```python # Description: This is an example of how to use the Agent class to run a multi-modal workflow import os + from dotenv import load_dotenv + from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.structs import Agent @@ -775,17 +747,11 @@ img = "assembly_line.jpg" ## Initialize the workflow agent = Agent( - llm=llm, - max_loops="auto", - autosave=True, - dashboard=True, - multi_modal=True + llm=llm, max_loops="auto", autosave=True, dashboard=True, multi_modal=True ) # Run the workflow on a task agent.run(task=task, img=img) - - ``` --- @@ -857,14 +823,10 @@ model = QwenVLMultiModal( ) # Run the model -response = model( - "Hello, how are you?", "https://example.com/image.jpg" -) +response = model("Hello, how are you?", "https://example.com/image.jpg") # Print the response print(response) - - ``` @@ -882,7 +844,6 @@ out = model.run("Analyze the reciepts in this image", "docs.jpg") # Print the output print(out) - ``` @@ -923,8 +884,6 @@ model.set_max_length(200) # Clear the chat history of the model model.clear_chat_history() - - ``` ## Radically Simple AI Model APIs @@ -941,9 +900,7 @@ We provide a vast array of language and multi-modal model APIs for you to genera from swarms.models import Anthropic # Initialize an instance of the Anthropic class -model = Anthropic( - anthropic_api_key="" -) +model = Anthropic(anthropic_api_key="") # Using the run method completion_1 = model.run("What is the capital of France?") @@ -952,7 +909,6 @@ print(completion_1) # Using the __call__ method completion_2 = model("How far is the moon from the earth?", stop=["miles", "km"]) print(completion_2) - ``` @@ -964,12 +920,16 @@ from swarms.models import HuggingfaceLLM custom_config = { "quantize": True, "quantization_config": {"load_in_4bit": True}, - "verbose": True + "verbose": True, } -inference = HuggingfaceLLM(model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config) +inference = HuggingfaceLLM( + model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config +) # Generate text based on a prompt -prompt_text = "Create a list of known biggest risks of structural collapse with references" +prompt_text = ( + "Create a list of known biggest risks of structural collapse with references" +) generated_text = inference(prompt_text) print(generated_text) ``` @@ -1027,7 +987,6 @@ task = "A person is walking on the street." # Generate the video! video_path = zeroscope(task) print(video_path) - ``` diff --git a/docs/applications/discord.md b/docs/applications/discord.md index cae3c8c1..dd7de16c 100644 --- a/docs/applications/discord.md +++ b/docs/applications/discord.md @@ -64,6 +64,7 @@ Initialize the `llm` (Language Learning Model) with your OpenAI API key: ```python from swarms.models import OpenAIChat + llm = OpenAIChat( openai_api_key="Your_OpenAI_API_Key", temperature=0.5, @@ -74,6 +75,7 @@ Initialize the bot with the `llm`: ```python from apps.discord import Bot + bot = Bot(llm=llm) ``` diff --git a/docs/examples/bingchat.md b/docs/examples/bingchat.md index 5ff93c63..3dabbf2e 100644 --- a/docs/examples/bingchat.md +++ b/docs/examples/bingchat.md @@ -46,6 +46,7 @@ You can also specify the conversation style: ```python from bing_chat import ConversationStyle + response = chat("Tell me a joke", style=ConversationStyle.creative) print(response) ``` diff --git a/docs/examples/flow.md b/docs/examples/flow.md index 92842491..2e635111 100644 --- a/docs/examples/flow.md +++ b/docs/examples/flow.md @@ -107,16 +107,17 @@ Now, let's create your first Agent. A Agent represents a chain-like structure th # Import necessary modules ```python -from swarms.models import OpenAIChat # Zephr, Mistral +from swarms.models import OpenAIChat # Zephr, Mistral from swarms.structs import Agent -api_key = ""# Initialize the language model (LLM) -llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)# Initialize the Agent object +api_key = "" # Initialize the language model (LLM) +llm = OpenAIChat( + openai_api_key=api_key, temperature=0.5, max_tokens=3000 +) # Initialize the Agent object -agent = Agent(llm=llm, max_loops=5)# Run the agent +agent = Agent(llm=llm, max_loops=5) # Run the agent out = agent.run("Create an financial analysis on the following metrics") print(out) - ``` ### [3. Initializing the Agent Object](https://github.com/kyegomez/swarms) diff --git a/docs/examples/omni_agent.md b/docs/examples/omni_agent.md index 56a6c996..e4d65498 100644 --- a/docs/examples/omni_agent.md +++ b/docs/examples/omni_agent.md @@ -55,7 +55,7 @@ from swarms.models import OpenAIChat llm = OpenAIChat(openai_api_key="sk-") agent = OmniModalAgent(llm) -response = agent.run("Create an video of a swarm of fish concept art, game art") +response = agent.run("Create an video of a swarm of fish concept art, game art") print(response) ``` diff --git a/docs/examples/revgpt.md b/docs/examples/revgpt.md index 5aa17af4..69107b40 100644 --- a/docs/examples/revgpt.md +++ b/docs/examples/revgpt.md @@ -35,7 +35,6 @@ The abstraction provided in `revgpt.py` is designed to simplify your interaction 1. **Import the Necessary Modules:** ```python -import os from dotenv import load_dotenv from revgpt import AbstractChatGPT ``` diff --git a/docs/examples/stacked_worker.md b/docs/examples/stacked_worker.md index cdcc537c..914d1754 100644 --- a/docs/examples/stacked_worker.md +++ b/docs/examples/stacked_worker.md @@ -28,8 +28,8 @@ The provided code showcases a system built around a worker node that utilizes va The code begins with import statements, bringing in necessary modules and classes. Key imports include the `OpenAIChat` class, which represents a language model, and several custom agents and tools from the `swarms` package. ```python -import os import interpreter # Assuming this is a custom module + from swarms.agents.hf_agents import HFAgent from swarms.agents.omni_modal_agent import OmniModalAgent from swarms.models import OpenAIChat @@ -59,11 +59,7 @@ All defined tools are appended to a list called `tools`. This list is later used ```python # Append tools to a list -tools = [ - hf_agent, - omni_agent, - compile -] +tools = [hf_agent, omni_agent, compile] ``` ### Initializing a Worker Node @@ -263,8 +259,6 @@ response = node.run(task) # Print the response print(response) - - ``` diff --git a/docs/examples/worker.md b/docs/examples/worker.md index 8fe2bf75..cd082aa4 100644 --- a/docs/examples/worker.md +++ b/docs/examples/worker.md @@ -53,11 +53,11 @@ Voila! You’re now ready to summon your Worker. Here’s a simple way to invoke the Worker and give it a task: ```python -from swarms.models import OpenAIChat from swarms import Worker +from swarms.models import OpenAIChat llm = OpenAIChat( - #enter your api key + # enter your api key openai_api_key="", temperature=0.5, ) @@ -75,8 +75,6 @@ node = Worker( task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." response = node.run(task) print(response) - - ``` diff --git a/docs/swarms/agents/abstractagent.md b/docs/swarms/agents/abstractagent.md index cdd06715..8833c164 100644 --- a/docs/swarms/agents/abstractagent.md +++ b/docs/swarms/agents/abstractagent.md @@ -37,7 +37,6 @@ class AbstractAgent: def memory(self, memory_store): """init memory""" - pass def reset(self): """(Abstract method) Reset the agent.""" @@ -82,7 +81,7 @@ agent.reset() The `run` method allows the agent to perform a specific task. ```python -agent.run('some_task') +agent.run("some_task") ``` #### 3. `chat` @@ -90,7 +89,7 @@ agent.run('some_task') The `chat` method enables communication with the agent through a series of messages. ```python -messages = [{'id': 1, 'text': 'Hello, agent!'}, {'id': 2, 'text': 'How are you?'}] +messages = [{"id": 1, "text": "Hello, agent!"}, {"id": 2, "text": "How are you?"}] agent.chat(messages) ``` @@ -99,7 +98,7 @@ agent.chat(messages) The `step` method allows the agent to process a single message. ```python -agent.step('Hello, agent!') +agent.step("Hello, agent!") ``` ### Asynchronous Methods diff --git a/docs/swarms/agents/message.md b/docs/swarms/agents/message.md index 87794ebc..413ac016 100644 --- a/docs/swarms/agents/message.md +++ b/docs/swarms/agents/message.md @@ -44,7 +44,7 @@ class Message: def __repr__(self): """ __repr__ represents the string representation of the Message object. - + Returns: (str) A string containing the timestamp, sender, and content of the message. """ @@ -60,10 +60,7 @@ The `Message` class represents a message in the agent system. Upon initializatio Creating a `Message` object and displaying its string representation. ```python -mes = Message( - sender = "Kye", - content = "Hello! How are you?" -) +mes = Message(sender="Kye", content="Hello! How are you?") print(mes) ``` @@ -80,9 +77,7 @@ Creating a `Message` object with metadata. ```python metadata = {"priority": "high", "category": "urgent"} mes_with_metadata = Message( - sender = "Alice", - content = "Important update", - metadata = metadata + sender="Alice", content="Important update", metadata=metadata ) print(mes_with_metadata) @@ -98,10 +93,7 @@ Output: Creating a `Message` object without providing metadata. ```python -mes_no_metadata = Message( - sender = "Bob", - content = "Reminder: Meeting at 2PM" -) +mes_no_metadata = Message(sender="Bob", content="Reminder: Meeting at 2PM") print(mes_no_metadata) ``` diff --git a/docs/swarms/agents/omni_agent.md b/docs/swarms/agents/omni_agent.md index a80da0e6..888e824f 100644 --- a/docs/swarms/agents/omni_agent.md +++ b/docs/swarms/agents/omni_agent.md @@ -39,10 +39,12 @@ For streaming mode, this function yields the response token by token, ensuring a ## Examples & Use Cases Initialize the `OmniModalAgent` and communicate with it: ```python +import os + +from dotenv import load_dotenv + from swarms.agents.omni_modal_agent import OmniModalAgent, OpenAIChat from swarms.models import OpenAIChat -from dotenv import load_dotenv -import os # Load the environment variables load_dotenv() diff --git a/docs/swarms/agents/toolagent.md b/docs/swarms/agents/toolagent.md index ebb00623..35a31f99 100644 --- a/docs/swarms/agents/toolagent.md +++ b/docs/swarms/agents/toolagent.md @@ -69,6 +69,7 @@ The `ToolAgent` class takes the following arguments: ```python from transformers import AutoModelForCausalLM, AutoTokenizer + from swarms import ToolAgent # Creating a model and tokenizer @@ -82,11 +83,8 @@ json_schema = { "name": {"type": "string"}, "age": {"type": "number"}, "is_student": {"type": "boolean"}, - "courses": { - "type": "array", - "items": {"type": "string"} - } - } + "courses": {"type": "array", "items": {"type": "string"}}, + }, } # Defining a task diff --git a/docs/swarms/agents/workeragent.md b/docs/swarms/agents/workeragent.md index e46ec1af..aaa71653 100644 --- a/docs/swarms/agents/workeragent.md +++ b/docs/swarms/agents/workeragent.md @@ -38,7 +38,7 @@ worker = Worker( human_in_the_loop=False, temperature=0.5, llm=some_language_model, - openai_api_key="my_key" + openai_api_key="my_key", ) worker.run("What's the weather in Miami?") ``` @@ -56,11 +56,11 @@ worker.send() ```python external_tools = [MyTool1(), MyTool2()] worker = Worker( -name="My Worker", -role="Worker", -external_tools=external_tools, -human_in_the_loop=False, -temperature=0.5, + name="My Worker", + role="Worker", + external_tools=external_tools, + human_in_the_loop=False, + temperature=0.5, ) ``` diff --git a/docs/swarms/chunkers/basechunker.md b/docs/swarms/chunkers/basechunker.md index 33b03312..11c32dc5 100644 --- a/docs/swarms/chunkers/basechunker.md +++ b/docs/swarms/chunkers/basechunker.md @@ -69,7 +69,9 @@ from basechunker import BaseChunker, ChunkSeparator chunker = BaseChunker() # Text to be chunked -input_text = "This is a long text that needs to be split into smaller chunks for processing." +input_text = ( + "This is a long text that needs to be split into smaller chunks for processing." +) # Chunk the text chunks = chunker.chunk(input_text) diff --git a/docs/swarms/chunkers/pdf_chunker.md b/docs/swarms/chunkers/pdf_chunker.md index 8c92060d..d3eb02a9 100644 --- a/docs/swarms/chunkers/pdf_chunker.md +++ b/docs/swarms/chunkers/pdf_chunker.md @@ -62,8 +62,8 @@ Let's explore how to use the `PdfChunker` class with different scenarios and app #### Example 1: Basic Chunking ```python -from swarms.chunkers.pdf_chunker import PdfChunker from swarms.chunkers.chunk_seperator import ChunkSeparator +from swarms.chunkers.pdf_chunker import PdfChunker # Initialize the PdfChunker pdf_chunker = PdfChunker() @@ -82,8 +82,8 @@ for idx, chunk in enumerate(chunks, start=1): #### Example 2: Custom Separators ```python -from swarms.chunkers.pdf_chunker import PdfChunker from swarms.chunkers.chunk_seperator import ChunkSeparator +from swarms.chunkers.pdf_chunker import PdfChunker # Define custom separators for PDF chunking custom_separators = [ChunkSeparator("\n\n"), ChunkSeparator(". ")] diff --git a/docs/swarms/index.md b/docs/swarms/index.md index 98c6b7a3..b052b6ce 100644 --- a/docs/swarms/index.md +++ b/docs/swarms/index.md @@ -28,7 +28,6 @@ We have a small gallery of examples to run here, [for more check out the docs to - Enterprise Grade + Production Grade: `Agent` is designed and optimized for automating real-world tasks at scale! ```python - from swarms.models import OpenAIChat from swarms.structs import Agent @@ -64,9 +63,6 @@ out = agent.run("Generate a 10,000 word blog on health and wellness.") # out = agent.print_history_and_memory() # # out = agent.save_state("flow_state.json") # print(out) - - - ``` ------ @@ -82,9 +78,7 @@ from swarms.structs import Agent from swarms.structs.sequential_workflow import SequentialWorkflow # Example usage -api_key = ( - "" # Your actual API key here -) +api_key = "" # Your actual API key here # Initialize the language agent llm = OpenAIChat( @@ -118,7 +112,6 @@ workflow.run() # Output the results for task in workflow.tasks: print(f"Task: {task.description}, Result: {task.result}") - ``` --- diff --git a/docs/swarms/memory/pg.md b/docs/swarms/memory/pg.md index 84878a76..3695e11c 100644 --- a/docs/swarms/memory/pg.md +++ b/docs/swarms/memory/pg.md @@ -110,7 +110,9 @@ def setup( ```python # Initialize the PgVectorVectorStore instance -vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") +vector_store = PgVectorVectorStore( + connection_string="your-db-connection-string", table_name="your-table-name" +) # Set up the database with default settings vector_store.setup() @@ -120,10 +122,14 @@ vector_store.setup() ```python # Initialize the PgVectorVectorStore instance -vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") +vector_store = PgVectorVectorStore( + connection_string="your-db-connection-string", table_name="your-table-name" +) # Set up the database with customized settings -vector_store.setup(create_schema=False, install_uuid_extension=True, install_vector_extension=True) +vector_store.setup( + create_schema=False, install_uuid_extension=True, install_vector_extension=True +) ``` ### 4.2 Upserting Vectors @@ -137,7 +143,7 @@ def upsert_vector( vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, - **kwargs + **kwargs, ) -> str: """ Inserts or updates a vector in the collection. @@ -158,7 +164,9 @@ def upsert_vector( ```python # Initialize the PgVectorVectorStore instance -vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") +vector_store = PgVectorVectorStore( + connection_string="your-db-connection-string", table_name="your-table-name" +) # Define a vector and upsert it vector = [0.1, 0.2, 0.3, 0.4] @@ -167,10 +175,7 @@ namespace = "your-namespace" meta = {"key1": "value1", "key2": "value2"} vector_store.upsert_vector( - vector=vector, - vector_id=vector_id, - namespace=namespace, - meta=meta + vector=vector, vector_id=vector_id, namespace=namespace, meta=meta ) ``` @@ -222,9 +227,7 @@ else: The `load_entries` method allows you to load all vector entries from the collection, optionally filtering by namespace. ```python -def load_entries( - self, namespace: Optional[str] = None -) -> list[BaseVectorStore.Entry]: +def load_entries(self, namespace: Optional[str] = None) -> list[BaseVectorStore.Entry]: """ Retrieves all vector entries from the collection, optionally filtering to only those that match the provided namespace. @@ -240,7 +243,9 @@ def load_entries( ```python # Initialize the PgVectorVectorStore instance -vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") +vector_store = PgVectorVectorStore( + connection_string="your-db-connection-string", table_name="your-table-name" +) # Load all vector entries in the specified namespace entries = vector_store.load_entries(namespace="your-namespace") @@ -266,7 +271,7 @@ def query( namespace: Optional[str] = None, include_vectors: bool = False, distance_metric: str = "cosine_distance", - **kwargs + **kwargs, ) -> list[BaseVectorStore.QueryResult]: """ Performs a search on the collection to find vectors similar to the provided input vector, @@ -290,7 +295,9 @@ def query( ```python # Initialize the PgVectorVectorStore instance -vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") +vector_store = PgVectorVectorStore( + connection_string="your-db-connection-string", table_name="your-table-name" +) # Perform a vector query query_string = "your-query-string" @@ -304,7 +311,7 @@ results = vector_store.query( count=count, namespace=namespace, include_vectors=include_vectors, - distance_metric=distance_metric + distance_metric=distance_metric, ) # Process the query results diff --git a/docs/swarms/memory/pinecone.md b/docs/swarms/memory/pinecone.md index 830d10fe..f8ca0f2e 100644 --- a/docs/swarms/memory/pinecone.md +++ b/docs/swarms/memory/pinecone.md @@ -174,7 +174,7 @@ pv = PineconeVector( api_key="your-api-key", index_name="your-index-name", environment="us-west1-gcp", - project_name="your-project-name" + project_name="your-project-name", ) ``` @@ -198,12 +198,7 @@ vector_id = "unique-vector-id" namespace = "your-namespace" meta = {"key1": "value1", "key2": "value2"} -pv.upsert_vector( - vector=vector, - vector_id=vector_id, - namespace=namespace, - meta=meta -) +pv.upsert_vector(vector=vector, vector_id=vector_id, namespace=namespace, meta=meta) ``` ### 4.4 Querying the Index @@ -222,7 +217,7 @@ results = pv.query( count=count, namespace=namespace, include_vectors=include_vectors, - include_metadata=include_metadata + include_metadata=include_metadata, ) # Process the query results diff --git a/docs/swarms/memory/qdrant.md b/docs/swarms/memory/qdrant.md index 3717d94f..e234e7be 100644 --- a/docs/swarms/memory/qdrant.md +++ b/docs/swarms/memory/qdrant.md @@ -14,8 +14,15 @@ pip install qdrant-client sentence-transformers httpx ```python class Qdrant: - def __init__(self, api_key: str, host: str, port: int = 6333, collection_name: str = "qdrant", model_name: str = "BAAI/bge-small-en-v1.5", https: bool = True): - ... + def __init__( + self, + api_key: str, + host: str, + port: int = 6333, + collection_name: str = "qdrant", + model_name: str = "BAAI/bge-small-en-v1.5", + https: bool = True, + ): ... ``` ### Constructor Parameters @@ -60,10 +67,7 @@ qdrant_client = Qdrant(api_key="your_api_key", host="localhost", port=6333) ### Example 2: Adding Vectors to a Collection ```python -documents = [ - {"page_content": "Sample text 1"}, - {"page_content": "Sample text 2"} -] +documents = [{"page_content": "Sample text 1"}, {"page_content": "Sample text 2"}] operation_info = qdrant_client.add_vectors(documents) print(operation_info) diff --git a/docs/swarms/memory/short_term_memory.md b/docs/swarms/memory/short_term_memory.md index 2aabbd5c..9ee3a738 100644 --- a/docs/swarms/memory/short_term_memory.md +++ b/docs/swarms/memory/short_term_memory.md @@ -125,7 +125,9 @@ def update_short_term(self, index, role: str, message: str, *args, **kwargs): ##### Example: Updating a Message in Short-Term Memory ```python -memory.update_short_term(index=0, role="Updated Role", message="Updated message content.") +memory.update_short_term( + index=0, role="Updated Role", message="Updated message content." +) ``` #### 7. `clear` diff --git a/docs/swarms/memory/weaviate.md b/docs/swarms/memory/weaviate.md index b23baedf..dc264653 100644 --- a/docs/swarms/memory/weaviate.md +++ b/docs/swarms/memory/weaviate.md @@ -82,7 +82,7 @@ weaviate_client.create_collection( {"name": "property1", "dataType": ["string"]}, {"name": "property2", "dataType": ["int"]}, ], - vectorizer_config=None # Optional vectorizer configuration + vectorizer_config=None, # Optional vectorizer configuration ) ``` @@ -99,8 +99,7 @@ The `add` method allows you to add an object to a specified collection in Weavia ```python weaviate_client.add( - collection_name="my_collection", - properties={"property1": "value1", "property2": 42} + collection_name="my_collection", properties={"property1": "value1", "property2": 42} ) ``` @@ -142,7 +141,7 @@ The `update` method allows you to update an object in a specified collection in weaviate_client.update( collection_name="my_collection", object_id="object123", - properties={"property1": "new_value", "property2": 99} + properties={"property1": "new_value", "property2": 99}, ) ``` @@ -158,10 +157,7 @@ The `delete` method allows you to delete an object from a specified collection i #### Usage ```python -weaviate_client.delete( - collection_name="my_collection", - object_id="object123" -) +weaviate_client.delete(collection_name="my_collection", object_id="object123") ``` ## Examples @@ -175,28 +171,21 @@ weaviate_client.create_collection( name="people", properties=[ {"name": "name", "dataType": ["string"]}, - {"name": "age", "dataType": ["int"]} - ] + {"name": "age", "dataType": ["int"]}, + ], ) ``` ### Example 2: Adding an Object ```python -weaviate_client.add( - collection_name="people", - properties={"name": "John", "age": 30} -) +weaviate_client.add(collection_name="people", properties={"name": "John", "age": 30}) ``` ### Example 3: Querying Objects ```python -results = weaviate_client.query( - collection_name="people", - query="name:John", - limit=5 -) +results = weaviate_client.query(collection_name="people", query="name:John", limit=5) ``` These examples cover the basic operations of creating collections, adding objects, and querying objects using the Weaviate API Client. diff --git a/docs/swarms/models/anthropic.md b/docs/swarms/models/anthropic.md index 85e7a428..438adfbe 100644 --- a/docs/swarms/models/anthropic.md +++ b/docs/swarms/models/anthropic.md @@ -72,9 +72,7 @@ class Anthropic: from swarms.models import Anthropic # Initialize an instance of the Anthropic class -model = Anthropic( - anthropic_api_key="" -) +model = Anthropic(anthropic_api_key="") # Using the run method completion_1 = model.run("What is the capital of France?") diff --git a/docs/swarms/models/base_multimodal_model.md b/docs/swarms/models/base_multimodal_model.md index 13efa11c..c1a8373d 100644 --- a/docs/swarms/models/base_multimodal_model.md +++ b/docs/swarms/models/base_multimodal_model.md @@ -149,7 +149,9 @@ model = BaseMultiModalModel( ) # Run the model with a text task and an image URL -response = model.run("Generate a summary of this text", "https://www.example.com/image.jpg") +response = model.run( + "Generate a summary of this text", "https://www.example.com/image.jpg" +) print(response) ``` @@ -209,6 +211,7 @@ for response in responses: ```python from swarms.models import BaseMultiModalModel + class CustomMultiModalModel(BaseMultiModalModel): def __init__(self, model_name, custom_parameter, *args, **kwargs): # Call the parent class constructor @@ -226,6 +229,7 @@ class CustomMultiModalModel(BaseMultiModalModel): # You can use self.custom_parameter and other inherited attributes pass + # Create an instance of your custom multimodal model custom_model = CustomMultiModalModel( model_name="your_custom_model_name", @@ -236,7 +240,9 @@ custom_model = CustomMultiModalModel( ) # Run your custom model -response = custom_model.run("Generate a summary of this text", "https://www.example.com/image.jpg") +response = custom_model.run( + "Generate a summary of this text", "https://www.example.com/image.jpg" +) print(response) # Generate a summary using your custom model diff --git a/docs/swarms/models/bingchat.md b/docs/swarms/models/bingchat.md index 70c184d3..a98aa8be 100644 --- a/docs/swarms/models/bingchat.md +++ b/docs/swarms/models/bingchat.md @@ -39,7 +39,6 @@ print(response) ```python from swarms.models.bing_chat import BingChat - edgegpt = BingChat(cookies_path="./path/to/cookies.json") response = edgegpt("Hello, my name is ChatGPT") print(response) @@ -48,7 +47,9 @@ print(response) 3. Generate an image based on a text prompt: ```python -image_path = edgegpt.create_img("Sunset over mountains", output_dir="./output", auth_cookie="your_auth_cookie") +image_path = edgegpt.create_img( + "Sunset over mountains", output_dir="./output", auth_cookie="your_auth_cookie" +) print(f"Generated image saved at {image_path}") ``` @@ -59,7 +60,9 @@ from swarms.models.bing_chat import BingChat edgegpt = BingChat(cookies_path="./path/to/cookies.json") -image_path = edgegpt.create_img("Sunset over mountains", output_dir="./output", auth_cookie="your_auth_cookie") +image_path = edgegpt.create_img( + "Sunset over mountains", output_dir="./output", auth_cookie="your_auth_cookie" +) print(f"Generated image saved at {image_path}") ``` diff --git a/docs/swarms/models/biogpt.md b/docs/swarms/models/biogpt.md index c43557b6..bb4e8914 100644 --- a/docs/swarms/models/biogpt.md +++ b/docs/swarms/models/biogpt.md @@ -83,7 +83,6 @@ print(generated_text) ```python from swarms.models import BioGPT - # Initialize the BioGPT model biogpt = BioGPT() @@ -99,7 +98,6 @@ print(features) ```python from swarms.models import BioGPT - # Initialize the BioGPT model biogpt = BioGPT() diff --git a/docs/swarms/models/distilled_whisperx.md b/docs/swarms/models/distilled_whisperx.md index e9339c1e..79c8c2ea 100644 --- a/docs/swarms/models/distilled_whisperx.md +++ b/docs/swarms/models/distilled_whisperx.md @@ -29,7 +29,7 @@ from swarms.models import DistilWhisperModel model_wrapper = DistilWhisperModel() # Initialize with a specific model ID -model_wrapper = DistilWhisperModel(model_id='distil-whisper/distil-large-v2') +model_wrapper = DistilWhisperModel(model_id="distil-whisper/distil-large-v2") ``` ## Attributes @@ -62,7 +62,7 @@ Transcribes audio input synchronously. ```python # Synchronous transcription -transcription = model_wrapper.transcribe('path/to/audio.mp3') +transcription = model_wrapper.transcribe("path/to/audio.mp3") print(transcription) ``` @@ -84,7 +84,7 @@ Transcribes audio input asynchronously. import asyncio # Asynchronous transcription -transcription = asyncio.run(model_wrapper.async_transcribe('path/to/audio.mp3')) +transcription = asyncio.run(model_wrapper.async_transcribe("path/to/audio.mp3")) print(transcription) ``` @@ -103,7 +103,7 @@ Simulates real-time transcription of an audio file. ```python # Real-time transcription simulation -model_wrapper.real_time_transcribe('path/to/audio.mp3', chunk_duration=5) +model_wrapper.real_time_transcribe("path/to/audio.mp3", chunk_duration=5) ``` ## Error Handling diff --git a/docs/swarms/models/gpt4v.md b/docs/swarms/models/gpt4v.md index 69968623..5ad80cd9 100644 --- a/docs/swarms/models/gpt4v.md +++ b/docs/swarms/models/gpt4v.md @@ -107,16 +107,16 @@ The `__call__` method is a convenient way to run the GPT-4 Vision model. It has ```python def __call__(task: str, img: str) -> str: """ - Run the GPT-4 Vision model (callable). + Run the GPT-4 Vision model (callable). - Parameters: - - task (str): The task or question related to the image. - - img + Parameters: + - task (str): The task or question related to the image. + - img - (str): URL of the image to analyze. + (str): URL of the image to analyze. - Returns: - str: The model's response. + Returns: + str: The model's response. """ ``` diff --git a/docs/swarms/models/huggingface.md b/docs/swarms/models/huggingface.md index 8606d8f2..50aaa2a1 100644 --- a/docs/swarms/models/huggingface.md +++ b/docs/swarms/models/huggingface.md @@ -114,9 +114,11 @@ from swarms.models import HuggingfaceLLM custom_config = { "quantize": True, "quantization_config": {"load_in_4bit": True}, - "verbose": True + "verbose": True, } -inference = HuggingfaceLLM(model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config) +inference = HuggingfaceLLM( + model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config +) # Generate text based on a prompt prompt_text = "Tell me a joke" diff --git a/docs/swarms/models/idefics.md b/docs/swarms/models/idefics.md index 04822fdd..57125038 100644 --- a/docs/swarms/models/idefics.md +++ b/docs/swarms/models/idefics.md @@ -36,7 +36,9 @@ model = Idefics() 2. Generate text based on prompts: ```python -prompts = ["User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"] +prompts = [ + "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" +] response = model(prompts) print(response) ``` @@ -47,7 +49,9 @@ print(response) from swarms.models import Idefics model = Idefics() -prompts = ["User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"] +prompts = [ + "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" +] response = model(prompts) print(response) ``` diff --git a/docs/swarms/models/index.md b/docs/swarms/models/index.md index 93883779..9e001eea 100644 --- a/docs/swarms/models/index.md +++ b/docs/swarms/models/index.md @@ -42,9 +42,10 @@ OpenAI(api_key: str, system: str = None, console: bool = True, model: str = None **Usage Example:** ```python -from swarms import OpenAI import asyncio +from swarms import OpenAI + chat = OpenAI(api_key="YOUR_OPENAI_API_KEY") response = chat.generate("Hello, how can I assist you?") @@ -126,7 +127,10 @@ GooglePalm(model_name: str = "models/chat-bison-001", google_api_key: str = None from swarms import GooglePalm google_palm = GooglePalm() -messages = [{"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": "Tell me a joke"}] +messages = [ + {"role": "system", "content": "You are a helpful assistant"}, + {"role": "user", "content": "Tell me a joke"}, +] response = google_palm.generate(messages) print(response["choices"][0]["text"]) diff --git a/docs/swarms/models/kosmos.md b/docs/swarms/models/kosmos.md index 1735e153..a19ea791 100644 --- a/docs/swarms/models/kosmos.md +++ b/docs/swarms/models/kosmos.md @@ -30,7 +30,9 @@ kosmos = Kosmos() 2. Perform Multimodal Grounding: ```python -kosmos.multimodal_grounding("Find the red apple in the image.", "https://example.com/apple.jpg") +kosmos.multimodal_grounding( + "Find the red apple in the image.", "https://example.com/apple.jpg" +) ``` ### Example 1 - Multimodal Grounding @@ -40,13 +42,17 @@ from swarms.models.kosmos_two import Kosmos kosmos = Kosmos() -kosmos.multimodal_grounding("Find the red apple in the image.", "https://example.com/apple.jpg") +kosmos.multimodal_grounding( + "Find the red apple in the image.", "https://example.com/apple.jpg" +) ``` 3. Perform Referring Expression Comprehension: ```python -kosmos.referring_expression_comprehension("Show me the green bottle.", "https://example.com/bottle.jpg") +kosmos.referring_expression_comprehension( + "Show me the green bottle.", "https://example.com/bottle.jpg" +) ``` ### Example 2 - Referring Expression Comprehension @@ -56,13 +62,17 @@ from swarms.models.kosmos_two import Kosmos kosmos = Kosmos() -kosmos.referring_expression_comprehension("Show me the green bottle.", "https://example.com/bottle.jpg") +kosmos.referring_expression_comprehension( + "Show me the green bottle.", "https://example.com/bottle.jpg" +) ``` 4. Generate Referring Expressions: ```python -kosmos.referring_expression_generation("It is on the table.", "https://example.com/table.jpg") +kosmos.referring_expression_generation( + "It is on the table.", "https://example.com/table.jpg" +) ``` ### Example 3 - Referring Expression Generation @@ -72,7 +82,9 @@ from swarms.models.kosmos_two import Kosmos kosmos = Kosmos() -kosmos.referring_expression_generation("It is on the table.", "https://example.com/table.jpg") +kosmos.referring_expression_generation( + "It is on the table.", "https://example.com/table.jpg" +) ``` 5. Perform Grounded Visual Question Answering (VQA): @@ -127,7 +139,10 @@ kosmos.grounded_image_captioning_detailed("https://example.com/beach.jpg") ```python image = kosmos.get_image("https://example.com/image.jpg") -entities = [("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)])] +entities = [ + ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), + ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]), +] kosmos.draw_entity_boxes_on_image(image, entities, show=True) ``` @@ -139,24 +154,38 @@ from swarms.models.kosmos_two import Kosmos kosmos = Kosmos() image = kosmos.get_image("https://example.com/image.jpg") -entities = [("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)])] +entities = [ + ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), + ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]), +] kosmos.draw_entity_boxes_on_image(image, entities, show=True) ``` 9. Generate Boxes for Entities: ```python -entities = [("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)])] -image = kosmos.generate_boxes("Find the apple and the banana in the image.", "https://example.com/image.jpg") +entities = [ + ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), + ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]), +] +image = kosmos.generate_boxes( + "Find the apple and the banana in the image.", "https://example.com/image.jpg" +) ``` ### Example 8 - Generating Boxes for Entities ```python from swarms.models.kosmos_two import Kosmos + kosmos = Kosmos() -entities = [("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)])] -image = kosmos.generate_boxes("Find the apple and the banana in the image.", "https://example.com/image.jpg") +entities = [ + ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), + ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]), +] +image = kosmos.generate_boxes( + "Find the apple and the banana in the image.", "https://example.com/image.jpg" +) ``` ## How Kosmos Works diff --git a/docs/swarms/models/mistral.md b/docs/swarms/models/mistral.md index c8dc179c..6c38cd97 100644 --- a/docs/swarms/models/mistral.md +++ b/docs/swarms/models/mistral.md @@ -150,7 +150,6 @@ Example: ```python from swarms.models import Mistral - model = Mistral() task = "Translate the following English text to French: 'Hello, how are you?'" result = model.run(task) diff --git a/docs/swarms/models/mpt.md b/docs/swarms/models/mpt.md index 41f3ec74..0592284b 100644 --- a/docs/swarms/models/mpt.md +++ b/docs/swarms/models/mpt.md @@ -52,10 +52,10 @@ class MPT7B: from swarms.models import MPT7B # Initialize the MPT7B class -mpt = MPT7B('mosaicml/mpt-7b-storywriter', 'EleutherAI/gpt-neox-20b', max_tokens=150) +mpt = MPT7B("mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150) # Generate text -output = mpt.run('generate', 'Once upon a time in a land far, far away...') +output = mpt.run("generate", "Once upon a time in a land far, far away...") print(output) ``` @@ -77,13 +77,16 @@ print(outputs) ```python import asyncio + from swarms.models import MPT7B # Initialize the MPT7B class -mpt = MPT7B('mosaicml/mpt-7b-storywriter', 'EleutherAI/gpt-neox-20b', max_tokens=150) +mpt = MPT7B("mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150) # Generate text asynchronously -output = asyncio.run(mpt.run_async('generate', 'Once upon a time in a land far, far away...')) +output = asyncio.run( + mpt.run_async("generate", "Once upon a time in a land far, far away...") +) print(output) ``` diff --git a/docs/swarms/models/openai.md b/docs/swarms/models/openai.md index f173619d..ae547631 100644 --- a/docs/swarms/models/openai.md +++ b/docs/swarms/models/openai.md @@ -168,7 +168,11 @@ prompt = "Translate the following English text to French: 'Hello, how are you?'" generated_text = openai.generate(prompt, max_tokens=50) # Generate text from multiple prompts -prompts = ["Translate this: 'Good morning' to Spanish.", "Summarize the following article:", article_text] +prompts = [ + "Translate this: 'Good morning' to Spanish.", + "Summarize the following article:", + article_text, +] generated_texts = openai.generate(prompts, max_tokens=100) # Generate text asynchronously @@ -188,7 +192,7 @@ custom_options = { "max_tokens": 100, "top_p": 0.9, "frequency_penalty": 0.2, - "presence_penalty": 0.4 + "presence_penalty": 0.4, } generated_text = openai.generate(prompt, **custom_options) ``` diff --git a/docs/swarms/models/openai_chat.md b/docs/swarms/models/openai_chat.md index a2ef9811..d7d9b2eb 100644 --- a/docs/swarms/models/openai_chat.md +++ b/docs/swarms/models/openai_chat.md @@ -150,7 +150,9 @@ user_message = "User: Tell me another joke." response = openai_chat.generate([user_message]) # Print the generated response -print(response[0][0].text) # Output: "Assistant: Why don't scientists trust atoms? Because they make up everything!" +print( + response[0][0].text +) # Output: "Assistant: Why don't scientists trust atoms? Because they make up everything!" ``` ### Example 3: Asynchronous Generation @@ -158,12 +160,14 @@ print(response[0][0].text) # Output: "Assistant: Why don't scientists trust ato ```python import asyncio + # Define an asynchronous function for generating responses async def generate_responses(): user_message = "User: Tell me a fun fact." async for chunk in openai_chat.stream([user_message]): print(chunk.text) + # Run the asynchronous generation function asyncio.run(generate_responses()) ``` diff --git a/docs/swarms/models/vilt.md b/docs/swarms/models/vilt.md index e2c6f325..2cb56b22 100644 --- a/docs/swarms/models/vilt.md +++ b/docs/swarms/models/vilt.md @@ -26,20 +26,26 @@ To use the Vilt model, follow these steps: ```python from swarms.models import Vilt + model = Vilt() ``` 2. Call the model with a text question and an image URL: ```python -output = model("What is this image?", "http://images.cocodataset.org/val2017/000000039769.jpg") +output = model( + "What is this image?", "http://images.cocodataset.org/val2017/000000039769.jpg" +) ``` ### Example 1 - Image Questioning ```python model = Vilt() -output = model("What are the objects in this image?", "http://images.cocodataset.org/val2017/000000039769.jpg") +output = model( + "What are the objects in this image?", + "http://images.cocodataset.org/val2017/000000039769.jpg", +) print(output) ``` @@ -47,7 +53,10 @@ print(output) ```python model = Vilt() -output = model("Describe the scene in this image.", "http://images.cocodataset.org/val2017/000000039769.jpg") +output = model( + "Describe the scene in this image.", + "http://images.cocodataset.org/val2017/000000039769.jpg", +) print(output) ``` @@ -55,7 +64,10 @@ print(output) ```python model = Vilt() -output = model("Tell me more about the landmark in this image.", "http://images.cocodataset.org/val2017/000000039769.jpg") +output = model( + "Tell me more about the landmark in this image.", + "http://images.cocodataset.org/val2017/000000039769.jpg", +) print(output) ``` diff --git a/docs/swarms/models/vllm.md b/docs/swarms/models/vllm.md index 06a7c8f8..778c1b2b 100644 --- a/docs/swarms/models/vllm.md +++ b/docs/swarms/models/vllm.md @@ -63,7 +63,7 @@ custom_vllm = vLLM( trust_remote_code=True, revision="abc123", temperature=0.7, - top_p=0.8 + top_p=0.8, ) ``` @@ -108,7 +108,7 @@ custom_vllm = vLLM( trust_remote_code=True, revision="abc123", temperature=0.7, - top_p=0.8 + top_p=0.8, ) # Generate text with custom configuration @@ -128,7 +128,7 @@ vllm = vLLM() tasks = [ "Translate the following sentence to French: 'Hello, world!'", "Write a short story set in a futuristic world.", - "Summarize the main points of a news article about climate change." + "Summarize the main points of a news article about climate change.", ] for task in tasks: diff --git a/docs/swarms/models/zephyr.md b/docs/swarms/models/zephyr.md index d9522711..76782ead 100644 --- a/docs/swarms/models/zephyr.md +++ b/docs/swarms/models/zephyr.md @@ -45,6 +45,7 @@ To use the Zephyr model, follow these steps: ```python from swarms.models import Zephyr + model = Zephyr(max_new_tokens=300, temperature=0.7, top_k=50, top_p=0.95) ``` diff --git a/docs/swarms/structs/abstractswarm.md b/docs/swarms/structs/abstractswarm.md index 78e28493..6bdf736b 100644 --- a/docs/swarms/structs/abstractswarm.md +++ b/docs/swarms/structs/abstractswarm.md @@ -47,9 +47,11 @@ The `AbstractSwarm` class is an abstract base class that serves as the foundatio ```python from abc import ABC, abstractmethod -from typing import Optional, List, Dict, Any +from typing import List + from swarms.swarms.base import AbstractWorker + class AbstractSwarm(ABC): """ Abstract class for swarm simulation architectures @@ -58,12 +60,12 @@ class AbstractSwarm(ABC): --------- ... """ + # The class definition and constructor are provided here. @abstractmethod def __init__(self, workers: List["AbstractWorker"]): """Initialize the swarm with workers""" - pass # Other abstract methods are listed here. ``` diff --git a/docs/swarms/structs/agent.md b/docs/swarms/structs/agent.md index f04bba02..8408b0b8 100644 --- a/docs/swarms/structs/agent.md +++ b/docs/swarms/structs/agent.md @@ -68,7 +68,9 @@ final_response = agent.run(initial_task) You can collect feedback during the conversation using the `provide_feedback` method: ```python -agent.provide_feedback("Generate an SOP for new sales employees on the best cold sales practices") +agent.provide_feedback( + "Generate an SOP for new sales employees on the best cold sales practices" +) ``` ### Stopping Condition @@ -78,9 +80,11 @@ You can define a custom stopping condition using a function. For example, you ca ```python from swarms.structs import Agent + def stop_when_repeats(response: str) -> bool: return "Stop" in response.lower() + agent = Agent(llm=my_language_model, max_loops=5, stopping_condition=stop_when_repeats) ``` @@ -107,9 +111,9 @@ Here are three usage examples: ### Example 1: Simple Conversation ```python -from swarms.structs import Agent # Select any Language model from the models folder from swarms.models import Mistral, OpenAIChat +from swarms.structs import Agent llm = Mistral() # llm = OpenAIChat() @@ -128,9 +132,11 @@ final_response = agent.run(initial_task) ```python from swarms.structs import Agent + def stop_when_repeats(response: str) -> bool: return "Stop" in response.lower() + agent = Agent(llm=llm, max_loops=5, stopping_condition=stop_when_repeats) ``` diff --git a/docs/swarms/structs/artifact.md b/docs/swarms/structs/artifact.md index 52444d9f..9e00f083 100644 --- a/docs/swarms/structs/artifact.md +++ b/docs/swarms/structs/artifact.md @@ -41,9 +41,7 @@ class Artifact(BaseModel): ) relative_path: Optional[str] = Field( None, - description=( - "Relative path of the artifact in the agent's workspace" - ), + description=("Relative path of the artifact in the agent's workspace"), example="python/code/", ) ``` @@ -64,7 +62,7 @@ from swarms.structs import Artifact artifact_instance = Artifact( artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56", file_name="main.py", - relative_path="python/code/" + relative_path="python/code/", ) ``` @@ -85,8 +83,7 @@ If the `relative_path` attribute is not provided during artifact creation, it wi ```python artifact_instance_no_path = Artifact( - artifact_id="c280s347-9b7d-3c68-m337-7abvf50j23k", - file_name="script.js" + artifact_id="c280s347-9b7d-3c68-m337-7abvf50j23k", file_name="script.js" ) print(artifact_instance_no_path.relative_path) diff --git a/docs/swarms/structs/artifactupload.md b/docs/swarms/structs/artifactupload.md index 533f7d53..90b30f58 100644 --- a/docs/swarms/structs/artifactupload.md +++ b/docs/swarms/structs/artifactupload.md @@ -15,9 +15,7 @@ class ArtifactUpload(BaseModel): file: bytes = Field(..., description="File to upload") relative_path: Optional[str] = Field( None, - description=( - "Relative path of the artifact in the agent's workspace" - ), + description=("Relative path of the artifact in the agent's workspace"), example="python/code/", ) ``` @@ -32,10 +30,12 @@ The `ArtifactUpload` class is used to create an instance of an artifact upload. from swarms.structs import ArtifactUpload # Uploading a file with no relative path -upload_no_path = ArtifactUpload(file=b'example_file_contents') +upload_no_path = ArtifactUpload(file=b"example_file_contents") # Uploading a file with a relative path -upload_with_path = ArtifactUpload(file=b'example_file_contents', relative_path="python/code/") +upload_with_path = ArtifactUpload( + file=b"example_file_contents", relative_path="python/code/" +) ``` In the above example, `upload_no_path` is an instance of `ArtifactUpload` with no specified `relative_path`, whereas `upload_with_path` is an instance of `ArtifactUpload` with the `relative_path` set to "python/code/". diff --git a/docs/swarms/structs/autoscaler.md b/docs/swarms/structs/autoscaler.md index 703ae860..6d07d3b1 100644 --- a/docs/swarms/structs/autoscaler.md +++ b/docs/swarms/structs/autoscaler.md @@ -36,7 +36,9 @@ Initializes the `AutoScaler` with a predefined number of agents and sets up conf ```python from swarms import AutoScaler -scaler = AutoScaler(initial_agents=5, scale_up_factor=3, idle_threshold=0.1, busy_threshold=0.8) +scaler = AutoScaler( + initial_agents=5, scale_up_factor=3, idle_threshold=0.1, busy_threshold=0.8 +) ``` --- @@ -140,7 +142,9 @@ scaler.start() from swarms import AutoScaler # Initialize the scaler -auto_scaler = AutoScaler(initial_agents=15, scale_up_factor=2, idle_threshold=0.2, busy_threshold=0.7) +auto_scaler = AutoScaler( + initial_agents=15, scale_up_factor=2, idle_threshold=0.2, busy_threshold=0.7 +) # Start the monitoring and task processing auto_scaler.start() @@ -161,7 +165,6 @@ auto_scaler.start() for i in range(100): # Adding tasks auto_scaler.add_task(f"Task {i}") - ``` diff --git a/docs/swarms/structs/baseworkflow.md b/docs/swarms/structs/baseworkflow.md index ee048af8..2cb4b5eb 100644 --- a/docs/swarms/structs/baseworkflow.md +++ b/docs/swarms/structs/baseworkflow.md @@ -13,19 +13,19 @@ Base class for workflows. Source Code: ```python - class BaseWorkflow(BaseStructure): - """ - Base class for workflows. +class BaseWorkflow(BaseStructure): +""" +Base class for workflows. - Attributes: - task_pool (list): A list to store tasks. + Attributes: + task_pool (list): A list to store tasks. - Methods: - add(task: Task = None, tasks: List[Task] = None, *args, **kwargs): - Adds a task or a list of tasks to the task pool. - run(): - Abstract method to run the workflow. - """ + Methods: + add(task: Task = None, tasks: List[Task] = None, *args, **kwargs): + Adds a task or a list of tasks to the task pool. + run(): + Abstract method to run the workflow. +""" ``` For the usage examples and additional in-depth documentation please visit [BaseWorkflow](https://github.com/swarms-modules/structs/blob/main/baseworkflow.md#swarms-structs) diff --git a/docs/swarms/structs/groupchatmanager.md b/docs/swarms/structs/groupchatmanager.md index 623a1fbf..cb2ce035 100644 --- a/docs/swarms/structs/groupchatmanager.md +++ b/docs/swarms/structs/groupchatmanager.md @@ -24,7 +24,7 @@ agents = Agent() manager = GroupChatManager(groupchat, selector) # Call the group chat manager passing a specific chat task -result = manager('Discuss the agenda for the upcoming meeting') +result = manager("Discuss the agenda for the upcoming meeting") ``` Explanation: @@ -67,9 +67,7 @@ class GroupChatManager: Returns: str: The response from the group chat. """ - self.groupchat.messages.append( - {"role": self.selector.name, "content": task} - ) + self.groupchat.messages.append({"role": self.selector.name, "content": task}) for i in range(self.groupchat.max_round): speaker = self.groupchat.select_speaker( last_speaker=self.selector, selector=self.selector diff --git a/docs/swarms/structs/json.md b/docs/swarms/structs/json.md index 2a5040e5..a495a0ce 100644 --- a/docs/swarms/structs/json.md +++ b/docs/swarms/structs/json.md @@ -78,16 +78,17 @@ Suppose we have a JSON Schema in `config_schema.json` for application configurat Now we'll create a subclass `AppConfig` that uses this schema. ```python -import json from swarms.structs import JSON + class AppConfig(JSON): def __init__(self, schema_path): super().__init__(schema_path) def validate(self, config_data): # Here we'll use a JSON Schema validation library like jsonschema - from jsonschema import validate, ValidationError + from jsonschema import ValidationError, validate + try: validate(instance=config_data, schema=self.schema) except ValidationError as e: @@ -95,15 +96,13 @@ class AppConfig(JSON): return False return True + # Main Example Usage if __name__ == "__main__": - config = { - "debug": True, - "window_size": [800, 600] - } + config = {"debug": True, "window_size": [800, 600]} - app_config = AppConfig('config_schema.json') + app_config = AppConfig("config_schema.json") if app_config.validate(config): print("Config is valid!") diff --git a/docs/swarms/structs/majorityvoting.md b/docs/swarms/structs/majorityvoting.md index 97d28847..4c7f7612 100644 --- a/docs/swarms/structs/majorityvoting.md +++ b/docs/swarms/structs/majorityvoting.md @@ -82,9 +82,11 @@ Executes the given task by all participating agents and aggregates the results t from swarms.structs.agent import Agent from swarms.structs.majority_voting import MajorityVoting + def create_agent(name): return Agent(name) + agents = [create_agent(name) for name in ["GPT-3", "Codex", "Tabnine"]] majority_voting = MajorityVoting(agents) result = majority_voting.run("What is the capital of France?") diff --git a/docs/swarms/structs/nonlinearworkflow.md b/docs/swarms/structs/nonlinearworkflow.md index 1974de24..09d00074 100644 --- a/docs/swarms/structs/nonlinearworkflow.md +++ b/docs/swarms/structs/nonlinearworkflow.md @@ -60,7 +60,7 @@ task3 = Task(llm, "Find a hotel in Paris") # Initialize the NonlinearWorkflow workflow = NonlinearWorkflow() # Add tasks to the workflow with dependencies -workflow.add(task1, task2.name) +workflow.add(task1, task2.name) workflow.add(task2, task3.name) workflow.add(task3, "OpenAIChat Initialization") # Execute the workflow @@ -82,7 +82,7 @@ task3 = Task(llm, "Find a hotel in Paris") # Initialize the NonlinearWorkflow workflow = NonlinearWorkflow() # Add tasks to the workflow with dependencies -workflow.add(task1) +workflow.add(task1) workflow.add(task2, task1.name) workflow.add(task3, task1.name, task2.name) # Execute the workflow diff --git a/docs/swarms/structs/sequential_workflow.md b/docs/swarms/structs/sequential_workflow.md index 6f6d8954..74ee1acb 100644 --- a/docs/swarms/structs/sequential_workflow.md +++ b/docs/swarms/structs/sequential_workflow.md @@ -310,9 +310,7 @@ from swarms.structs import Agent from swarms.structs.sequential_workflow import SequentialWorkflow # Example usage -api_key = ( - "" # Your actual API key here -) +api_key = "" # Your actual API key here # Initialize the language agent llm = OpenAIChat( @@ -350,9 +348,7 @@ from swarms.structs import Agent from swarms.structs.sequential_workflow import SequentialWorkflow # Example usage -api_key = ( - "" # Your actual API key here -) +api_key = "" # Your actual API key here # Initialize the language agent llm = OpenAIChat( @@ -393,9 +389,7 @@ from swarms.structs import Agent from swarms.structs.sequential_workflow import SequentialWorkflow # Example usage -api_key = ( - "" # Your actual API key here -) +api_key = "" # Your actual API key here # Initialize the language agent llm = OpenAIChat( @@ -436,9 +430,7 @@ from swarms.structs import Agent from swarms.structs.sequential_workflow import SequentialWorkflow # Example usage -api_key = ( - "" # Your actual API key here -) +api_key = "" # Your actual API key here # Initialize the language agent llm = OpenAIChat( diff --git a/docs/swarms/structs/stackoverflowswarm.md b/docs/swarms/structs/stackoverflowswarm.md index 5f5630df..c3e83b47 100644 --- a/docs/swarms/structs/stackoverflowswarm.md +++ b/docs/swarms/structs/stackoverflowswarm.md @@ -58,11 +58,13 @@ The design of the `StackOverflowSwarm` class is intended to allow easy tracking from swarms.structs.agent import Agent from swarms.structs.stack_overflow_swarm import StackOverflowSwarm + # Define custom Agents with some logic (placeholder for actual Agent implementation) class CustomAgent(Agent): def run(self, conversation, *args, **kwargs): return "This is a response from CustomAgent." + # Initialize agents agent1 = CustomAgent(ai_name="Agent1") agent2 = CustomAgent(ai_name="Agent2") diff --git a/docs/swarms/structs/stepinput.md b/docs/swarms/structs/stepinput.md index ad4be016..2230ccdf 100644 --- a/docs/swarms/structs/stepinput.md +++ b/docs/swarms/structs/stepinput.md @@ -12,10 +12,7 @@ The `StepInput` class is defined as follows: class StepInput(BaseModel): __root__: Any = Field( ..., - description=( - "Input parameters for the task step. Any value is" - " allowed." - ), + description=("Input parameters for the task step. Any value is" " allowed."), example='{\n"file_to_refactor": "models.py"\n}', ) ``` @@ -29,10 +26,7 @@ The `StepInput` class is designed to accept any input value, providing flexibili ```python from swarms.structs import StepInput -input_params = { - "file_to_refactor": "models.py", - "refactor_method": "code" -} +input_params = {"file_to_refactor": "models.py", "refactor_method": "code"} step_input = StepInput(__root__=input_params) ``` @@ -42,10 +36,7 @@ In this example, we import the `StepInput` class from the `swarms.structs` libra ```python from swarms.structs import StepInput -input_params = { - "input_path": "data.csv", - "output_path": "result.csv" -} +input_params = {"input_path": "data.csv", "output_path": "result.csv"} step_input = StepInput(__root__=input_params) ``` @@ -56,7 +47,7 @@ In this example, we again create an instance of `StepInput` by passing a diction from swarms.structs import StepInput file_path = "config.json" -with open(file_path, 'r') as f: +with open(file_path) as f: input_data = json.load(f) step_input = StepInput(__root__=input_data) diff --git a/docs/swarms/structs/swarmnetwork.md b/docs/swarms/structs/swarmnetwork.md index 2c2f74e5..efbdf279 100644 --- a/docs/swarms/structs/swarmnetwork.md +++ b/docs/swarms/structs/swarmnetwork.md @@ -28,6 +28,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + agent = Agent() swarm = SwarmNetwork(agents=[agent]) swarm.add_task("task") @@ -41,6 +42,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + agent = Agent() swarm = SwarmNetwork(agents=[agent]) await swarm.async_add_task("task") @@ -57,6 +59,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + agent = Agent() swarm = SwarmNetwork(agents=[agent]) swarm.run_single_agent(agent_id, "task") @@ -72,6 +75,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + agent = Agent() swarm = SwarmNetwork(agents=[agent]) swarm.run_many_agents("task") @@ -85,6 +89,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + agent = Agent() swarm = SwarmNetwork(agents=[agent]) swarm.list_agents() @@ -98,6 +103,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + agent = Agent() swarm = SwarmNetwork() swarm.add_agent(agent) @@ -111,6 +117,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + agent = Agent() swarm = SwarmNetwork(agents=[agent]) swarm.remove_agent(agent_id) @@ -124,6 +131,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + swarm = SwarmNetwork() swarm.scale_up(num_agents=5) ``` @@ -136,6 +144,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + swarm = SwarmNetwork(agents=[agent1, agent2, agent3, agent4, agent5]) swarm.scale_down(num_agents=2) ``` @@ -146,6 +155,7 @@ The `SwarmNetwork` class has the following parameters: ```python from swarms.structs.agent import Agent from swarms.structs.swarm_net import SwarmNetwork + agent = Agent() swarm = SwarmNetwork(agents=[agent]) swarm.create_apis_for_agents() diff --git a/docs/swarms/structs/task.md b/docs/swarms/structs/task.md index 7e829b66..f6a72aef 100644 --- a/docs/swarms/structs/task.md +++ b/docs/swarms/structs/task.md @@ -8,8 +8,9 @@ ```python # Example 1: Creating and executing a Task -from swarms.structs import Task, Agent from swarms.models import OpenAIChat +from swarms.structs import Agent, Task + agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False) task = Task(agent=agent) task.execute("What's the weather in miami") diff --git a/docs/swarms/structs/taskinput.md b/docs/swarms/structs/taskinput.md index e7a0be68..8e9ed33f 100644 --- a/docs/swarms/structs/taskinput.md +++ b/docs/swarms/structs/taskinput.md @@ -20,11 +20,14 @@ The `TaskInput` class encapsulates the input parameters in a structured format. #### Usage Example 1: Using TaskInput for Debugging ```python from pydantic import BaseModel, Field + from swarms.structs import TaskInput + class DebugInput(TaskInput): debug: bool + # Creating an instance of DebugInput debug_params = DebugInput(__root__={"debug": True}) @@ -35,11 +38,14 @@ print(debug_params.debug) # Output: True #### Usage Example 2: Using TaskInput for Task Modes ```python from pydantic import BaseModel, Field + from swarms.structs import TaskInput + class ModeInput(TaskInput): mode: str + # Creating an instance of ModeInput mode_params = ModeInput(__root__={"mode": "benchmarks"}) @@ -50,12 +56,15 @@ print(mode_params.mode) # Output: benchmarks #### Usage Example 3: Using TaskInput with Arbitrary Parameters ```python from pydantic import BaseModel, Field + from swarms.structs import TaskInput + class ArbitraryInput(TaskInput): message: str quantity: int + # Creating an instance of ArbitraryInput arbitrary_params = ArbitraryInput(__root__={"message": "Hello, world!", "quantity": 5}) diff --git a/docs/swarms/structs/taskqueuebase.md b/docs/swarms/structs/taskqueuebase.md index a8d4c599..0feee648 100644 --- a/docs/swarms/structs/taskqueuebase.md +++ b/docs/swarms/structs/taskqueuebase.md @@ -7,13 +7,14 @@ The `swarms.structs` library is a key component of a multi-agent system's task m ## TaskQueueBase Class ```python -from abc import ABC, abstractmethod import threading +from abc import ABC, abstractmethod # Include any additional imports that are relevant to decorators and other classes such as Task and Agent if needed # Definition of the synchronized_queue decorator (if necessary) + class TaskQueueBase(ABC): def __init__(self): self.lock = threading.Lock() @@ -27,12 +28,12 @@ class TaskQueueBase(ABC): @abstractmethod def get_task(self, agent: Agent) -> Task: pass - + @synchronized_queue @abstractmethod def complete_task(self, task_id: str): pass - + @synchronized_queue @abstractmethod def reset_task(self, task_id: str): @@ -65,11 +66,12 @@ Below are three examples of how the `TaskQueueBase` class can be implemented and ```python # file: basic_queue.py -import threading -from swarms.structs import TaskQueueBase, Task, Agent # Assume synchronized_queue decorator is defined elsewhere -from decorators import synchronized_queue +from decorators import synchronized_queue + +from swarms.structs import Agent, Task, TaskQueueBase + class BasicTaskQueue(TaskQueueBase): def __init__(self): @@ -95,6 +97,7 @@ class BasicTaskQueue(TaskQueueBase): # Logic to reset the task pass + # Usage queue = BasicTaskQueue() # Add task, assuming Task object is created diff --git a/docs/swarms/tokenizers/basetokenizer.md b/docs/swarms/tokenizers/basetokenizer.md index 16db07fc..8047f301 100644 --- a/docs/swarms/tokenizers/basetokenizer.md +++ b/docs/swarms/tokenizers/basetokenizer.md @@ -25,6 +25,7 @@ The `BaseTokenizer` class provides the structure for creating tokenizers. It inc ```python from swarms.tokenizers import BaseTokenizer + class SimpleTokenizer(BaseTokenizer): def count_tokens(self, text: Union[str, List[dict]]) -> int: @@ -33,10 +34,11 @@ class SimpleTokenizer(BaseTokenizer): return len(text.split()) elif isinstance(text, list): # Assume list of dictionaries with 'token' key - return sum(len(item['token'].split()) for item in text) + return sum(len(item["token"].split()) for item in text) else: raise TypeError("Unsupported type for text") + # Usage example tokenizer = SimpleTokenizer(max_tokens=100) text = "This is an example sentence to tokenize." diff --git a/docs/swarms/tokenizers/coheretokenizer.md b/docs/swarms/tokenizers/coheretokenizer.md index 5a11b8ce..197346a3 100644 --- a/docs/swarms/tokenizers/coheretokenizer.md +++ b/docs/swarms/tokenizers/coheretokenizer.md @@ -57,7 +57,7 @@ def count_tokens(self, text: str | list) -> int: Args: text (str | list): The input text to tokenize. - + Returns: int: The number of tokens in the text. @@ -82,13 +82,14 @@ First, the Cohere client must be initialized and passed in to create an instance ```python from cohere import Client + from swarms.tokenizers import CohereTokenizer # Initialize Cohere client with your API key -cohere_client = Client('your-api-key') +cohere_client = Client("your-api-key") # Instantiate the tokenizer -tokenizer = CohereTokenizer(model='your-model-name', client=cohere_client) +tokenizer = CohereTokenizer(model="your-model-name", client=cohere_client) ``` ### Count Tokens Example 1 diff --git a/docs/swarms/tokenizers/huggingfacetokenizer.md b/docs/swarms/tokenizers/huggingfacetokenizer.md index 8cf7fd77..9330b3be 100644 --- a/docs/swarms/tokenizers/huggingfacetokenizer.md +++ b/docs/swarms/tokenizers/huggingfacetokenizer.md @@ -76,7 +76,7 @@ Tokenizes given text when the object is called like a function. from swarms.tokenizers import HuggingFaceTokenizer # Initialize the tokenizer with the path to your tokenizer model. -tokenizer = HuggingFaceTokenizer('/path/to/your/model_dir') +tokenizer = HuggingFaceTokenizer("/path/to/your/model_dir") ``` ### 2. Encoding Text diff --git a/docs/swarms/tokenizers/openaitokenizer.md b/docs/swarms/tokenizers/openaitokenizer.md index 9e051c32..94918261 100644 --- a/docs/swarms/tokenizers/openaitokenizer.md +++ b/docs/swarms/tokenizers/openaitokenizer.md @@ -50,7 +50,7 @@ Given the extensive nature of this class, several examples are provided for each ```python from swarms.tokenizers import OpenAITokenizer -tokenizer = OpenAITokenizer(model='gpt-4') +tokenizer = OpenAITokenizer(model="gpt-4") ``` This example creates a new instance of `OpenAITokenizer` set to work with the GPT-4 model. @@ -61,7 +61,7 @@ This example creates a new instance of `OpenAITokenizer` set to work with the GP text = "Hello, this is an example text to tokenize." # Initialize the tokenizer -tokenizer = OpenAITokenizer(model='gpt-4') +tokenizer = OpenAITokenizer(model="gpt-4") # Count tokens num_tokens = tokenizer.count_tokens(text) @@ -78,7 +78,7 @@ messages = [ {"name": "Bob", "message": "I'm good! Just working on some code."}, ] -tokenizer = OpenAITokenizer(model='gpt-3.5-turbo') +tokenizer = OpenAITokenizer(model="gpt-3.5-turbo") # Count tokens for a list of messages num_tokens = tokenizer.len(messages, model="gpt-3.5-turbo-0613") diff --git a/docs/swarms/tokenizers/sentencepiecetokenizer.md b/docs/swarms/tokenizers/sentencepiecetokenizer.md index 390dbb07..ac6f2a2f 100644 --- a/docs/swarms/tokenizers/sentencepiecetokenizer.md +++ b/docs/swarms/tokenizers/sentencepiecetokenizer.md @@ -14,7 +14,7 @@ In `SentencePieceTokenizer`, the tokenization process is language-agnostic and e class SentencePieceTokenizer: """ Tokenizer of sentencepiece. - + Args: model_file (str): the path of the tokenizer model """ @@ -45,7 +45,7 @@ Parameter | Type | Description ```python from swarms.tokenizers import SentencePieceTokenizer -tokenizer = SentencePieceTokenizer(model_file='your_model.model') +tokenizer = SentencePieceTokenizer(model_file="your_model.model") ``` ### Properties: Vocabulary Information diff --git a/docs/swarms/utils/check_device.md b/docs/swarms/utils/check_device.md index bdb8c780..a944dc1f 100644 --- a/docs/swarms/utils/check_device.md +++ b/docs/swarms/utils/check_device.md @@ -36,16 +36,18 @@ This function does not take any mandatory argument. However, it supports optiona ### Example 1: Basic Usage ```python -import torch import logging + +import torch + from swarms.utils import check_device # Basic usage device = check_device( - log_level=logging.INFO, - memory_threshold=0.8, - capability_threshold=3.5, - return_type="list" + log_level=logging.INFO, + memory_threshold=0.8, + capability_threshold=3.5, + return_type="list", ) ``` @@ -53,24 +55,24 @@ device = check_device( ```python import torch -import logging + from swarms.utils import check_device # When CUDA is not available device = check_device() -print(device) # If CUDA is not available it should return torch.device('cpu') +print(device) # If CUDA is not available it should return torch.device('cpu') ``` ### Example 3: Multiple GPU Available ```python import torch -import logging + from swarms.utils import check_device # When multiple GPUs are available device = check_device() -print(device) # Should return a list of available GPU devices +print(device) # Should return a list of available GPU devices ``` ## Tips and Additional Information diff --git a/docs/swarms/utils/extract_code_from_markdown.md b/docs/swarms/utils/extract_code_from_markdown.md index f6f76835..fdef5018 100644 --- a/docs/swarms/utils/extract_code_from_markdown.md +++ b/docs/swarms/utils/extract_code_from_markdown.md @@ -57,14 +57,13 @@ Below are three examples of how you might use this function: Extracting code blocks from a simple markdown string. ```python -import re from swarms.utils import extract_code_from_markdown -markdown_string = '''# Example +markdown_string = """# Example This is an example of a code block: ```python print("Hello World!") -``` ''' +``` """ print(extract_code_from_markdown(markdown_string)) ``` @@ -75,13 +74,15 @@ Extracting code blocks from a markdown file. ```python import re + def extract_code_from_markdown(markdown_content: str) -> str: pattern = r"```(?:\w+\n)?(.*?)```" matches = re.findall(pattern, markdown_content, re.DOTALL) return "\n".join(code.strip() for code in matches) + # Assume that 'example.md' contains multiple code blocks -with open('example.md', 'r') as file: +with open("example.md") as file: markdown_content = file.read() print(extract_code_from_markdown(markdown_content)) ``` @@ -93,17 +94,20 @@ Using the function in a pipeline to extract and then analyze code blocks. ```python import re + def extract_code_from_markdown(markdown_content: str) -> str: pattern = r"```(?:\w+\n)?(.*?)```" matches = re.findall(pattern, markdown_content, re.DOTALL) return "\n".join(code.strip() for code in matches) + def analyze_code_blocks(code: str): # Add your analysis logic here - pass + pass + # Assume that 'example.md' contains multiple code blocks -with open('example.md', 'r') as file: +with open("example.md") as file: markdown_content = file.read() code_blocks = extract_code_from_markdown(markdown_content) analyze_code_blocks(code_blocks) diff --git a/docs/swarms/utils/find_image_path.md b/docs/swarms/utils/find_image_path.md index 59c9c127..844cbe78 100644 --- a/docs/swarms/utils/find_image_path.md +++ b/docs/swarms/utils/find_image_path.md @@ -40,15 +40,9 @@ The function `find_image_path` performs text parsing and pattern recognition to ```python def find_image_path(text): pattern = r"([A-Za-z]:\\[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))|(/[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))" - matches = [ - match.group() - for match in re.finditer(pattern, text) - if match.group() - ] + matches = [match.group() for match in re.finditer(pattern, text) if match.group()] matches += [match.replace("\\", "") for match in matches if match] - existing_paths = [ - match for match in matches if os.path.exists(match) - ] + existing_paths = [match for match in matches if os.path.exists(match)] return max(existing_paths, key=len) if existing_paths else None ``` @@ -75,7 +69,9 @@ Consider the case where the text has multiple image paths. from swarms.utils import find_image_path text = "Here is an image path: /home/user/image1.png. Here is another one: C:\\Users\\User\\Documents\\image2.jpeg" -print(find_image_path(text)) # Outputs: the longest image path (depends on your file system and existing files) +print( + find_image_path(text) +) # Outputs: the longest image path (depends on your file system and existing files) ``` **Example 3:** diff --git a/docs/swarms/utils/limit_tokens_from_string.md b/docs/swarms/utils/limit_tokens_from_string.md index b096ebad..bc2cf8cf 100644 --- a/docs/swarms/utils/limit_tokens_from_string.md +++ b/docs/swarms/utils/limit_tokens_from_string.md @@ -73,7 +73,7 @@ from swarms.utils import limit_tokens_from_string string = "In case the method does not find the specified model, it will fall back to gpt2 model." # model -model = "gpt-4" +model = "gpt-4" output = limit_tokens_from_string(string, model=model) ``` diff --git a/docs/swarms/utils/load_model_torch.md b/docs/swarms/utils/load_model_torch.md index ddcd7ee6..7effabb6 100644 --- a/docs/swarms/utils/load_model_torch.md +++ b/docs/swarms/utils/load_model_torch.md @@ -50,12 +50,14 @@ This function can be used directly inside your code as shown in the following ex Loading a model without specifying a device results in the function choosing the most optimal available device automatically. ```python -from swarms.utils import load_model_torch import torch.nn as nn +from swarms.utils import load_model_torch + # Assume `mymodel.pth` is in the current directory model_path = "./mymodel.pth" + # Define your model architecture if the model file only contains state dict class MyModel(nn.Module): def __init__(self): @@ -65,6 +67,7 @@ class MyModel(nn.Module): def forward(self, x): return self.linear(x) + model = MyModel() # Load the model diff --git a/docs/swarms/utils/math_eval.md b/docs/swarms/utils/math_eval.md index 691089f8..19eb9517 100644 --- a/docs/swarms/utils/math_eval.md +++ b/docs/swarms/utils/math_eval.md @@ -15,6 +15,8 @@ Let's say you have two functions: `ground_truth` and `generated_func`, that have @math_eval(ground_truth, generated_func) def test_func(x): return x + + result1, result2 = test_func(5) print(f"Result from ground_truth: {result1}") print(f"Result from generated_func: {result2}") @@ -46,6 +48,7 @@ Here's how to implement the `math_eval` decorator: import functools import logging + def math_eval(func1, func2): """Math evaluation decorator.""" @@ -65,9 +68,7 @@ def math_eval(func1, func2): result2 = None if result1 != result2: - logging.warning( - f"Outputs do not match: {result1} != {result2}" - ) + logging.warning(f"Outputs do not match: {result1} != {result2}") return result1, result2 diff --git a/docs/swarms/utils/metrics_decorator.md b/docs/swarms/utils/metrics_decorator.md index aeafe151..17850ba1 100644 --- a/docs/swarms/utils/metrics_decorator.md +++ b/docs/swarms/utils/metrics_decorator.md @@ -70,6 +70,7 @@ def text_generator(self, text: str): # language generation implementation goes here return tokens + # Instantiate the class and call the decorated function obj = ClassName() obj.text_generator("Hello, world!") diff --git a/docs/swarms/utils/pdf_to_text.md b/docs/swarms/utils/pdf_to_text.md index e5b8dce0..3ec73039 100644 --- a/docs/swarms/utils/pdf_to_text.md +++ b/docs/swarms/utils/pdf_to_text.md @@ -54,7 +54,7 @@ Here is an example of how to use `pdf_to_text`: ```python # Define the path to the pdf file -pdf_path = 'sample.pdf' +pdf_path = "sample.pdf" # Use the function to extract text text = pdf_to_text(pdf_path) diff --git a/docs/swarms/utils/prep_torch_inference.md b/docs/swarms/utils/prep_torch_inference.md index 68598fa8..0fde2503 100644 --- a/docs/swarms/utils/prep_torch_inference.md +++ b/docs/swarms/utils/prep_torch_inference.md @@ -56,7 +56,8 @@ Here are some examples of how you can use the `prep_torch_inference` method. Bef ```python import torch -from swarms.utils import prep_torch_inference, load_model_torch + +from swarms.utils import load_model_torch, prep_torch_inference ``` ### Example 1: Load a model for inference on CPU diff --git a/docs/swarms/utils/print_class_parameters.md b/docs/swarms/utils/print_class_parameters.md index 3c09578f..84e0104f 100644 --- a/docs/swarms/utils/print_class_parameters.md +++ b/docs/swarms/utils/print_class_parameters.md @@ -94,16 +94,16 @@ def print_class_parameters(cls, api_format: bool = False): if api_format: param_dict = {} for name, param in params.items(): - if name == "self": - continue - param_dict[name] = str(param.annotation) + if name == "self": + continue + param_dict[name] = str(param.annotation) return param_dict # Print the parameters for name, param in params.items(): - if name == "self": - continue - print(f"Parameter: {name}, Type: {param.annotation}") + if name == "self": + continue + print(f"Parameter: {name}, Type: {param.annotation}") except Exception as e: print(f"An error occurred while inspecting the class: {e}") diff --git a/docs/swarms/workers/base.md b/docs/swarms/workers/base.md index 8991210b..9da45ba3 100644 --- a/docs/swarms/workers/base.md +++ b/docs/swarms/workers/base.md @@ -37,43 +37,46 @@ class AbstractWorker: Args: name (str): Name of the worker. """ - + @property def name(self): """Get the name of the worker.""" - pass def run(self, task: str): """Run the worker agent once.""" - pass - def send(self, message: Union[Dict, str], recipient, request_reply: Optional[bool] = None): + def send( + self, message: Union[Dict, str], recipient, request_reply: Optional[bool] = None + ): """Send a message to another worker.""" - pass - async def a_send(self, message: Union[Dict, str], recipient, request_reply: Optional[bool] = None): + async def a_send( + self, message: Union[Dict, str], recipient, request_reply: Optional[bool] = None + ): """Send a message to another worker asynchronously.""" - pass - def receive(self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None): + def receive( + self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None + ): """Receive a message from another worker.""" - pass - async def a_receive(self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None): + async def a_receive( + self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None + ): """Receive a message from another worker asynchronously.""" - pass def reset(self): """Reset the worker.""" - pass - def generate_reply(self, messages: Optional[List[Dict]] = None, sender=None, **kwargs) -> Union[str, Dict, None]: + def generate_reply( + self, messages: Optional[List[Dict]] = None, sender=None, **kwargs + ) -> Union[str, Dict, None]: """Generate a reply based on received messages.""" - pass - async def a_generate_reply(self, messages: Optional[List[Dict]] = None, sender=None, **kwargs) -> Union[str, Dict, None]: + async def a_generate_reply( + self, messages: Optional[List[Dict]] = None, sender=None, **kwargs + ) -> Union[str, Dict, None]: """Generate a reply based on received messages asynchronously.""" - pass ``` ### 2.2 Attributes @@ -121,6 +124,7 @@ class MyWorker(AbstractWorker): def run(self, task: str): print(f"{self.name} is performing task: {task}") + worker = MyWorker("Worker1") worker.run("Collect data") ``` @@ -155,6 +159,7 @@ The `a_send()` method is an asynchronous version of the `send()` method, allowin ```python import asyncio + async def main(): worker1 = AbstractWorker("Worker1") worker2 = AbstractWorker("Worker2") @@ -162,6 +167,7 @@ async def main(): message = "Hello, Worker2!" await worker1.a_send(message, worker2) + loop = asyncio.get_event_loop() loop.run_until_complete(main()) ``` @@ -208,6 +214,7 @@ The `a_receive()` method is an asynchronous version of the `receive()` method, a ```python import asyncio + async def main(): worker1 = AbstractWorker("Worker1") worker2 = AbstractWorker("Worker2") @@ -218,6 +225,7 @@ async def main(): await worker1.a_receive(message1, worker2) await worker1.a_receive(message2, worker2) + loop = asyncio.get_event_loop() loop.run_until_complete(main()) ``` @@ -233,6 +241,7 @@ class MyWorker(AbstractWorker): def reset(self): print(f"{self.name} has been reset.") + worker = MyWorker("Worker1") worker.reset() ``` @@ -253,13 +262,16 @@ The `generate_reply()` method is a placeholder for generating a reply based on r ```python class MyWorker(AbstractWorker): - def generate_reply(self, messages: Optional[List[Dict]] = None, sender=None, **kwargs) -> Union[str, Dict, None]: + def generate_reply( + self, messages: Optional[List[Dict]] = None, sender=None, **kwargs + ) -> Union[str, Dict, None]: if messages: # Generate a reply based on received messages return f"Received {len(messages)} messages from {sender.name}." else: return None + worker1 = MyWorker("Worker1") worker2 = MyWorker("Worker2") @@ -284,6 +296,7 @@ The `a_generate_reply()` method is an asynchronous version of the `generate_repl ```python import asyncio + async def main(): worker1 = AbstractWorker("Worker1") worker2 = AbstractWorker("Worker2") @@ -294,6 +307,7 @@ async def main(): if reply: print(f"{worker2.name} generated a reply: {reply}") + loop = asyncio.get_event_loop() loop.run_until_complete(main()) ``` @@ -312,12 +326,16 @@ Start by creating a custom worker class that inherits from `AbstractWorker`. Def class CustomWorker(AbstractWorker): def run(self, task: str): print(f"{self.name} is performing task: {task}") - - def receive(self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None): + + def receive( + self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None + ): if isinstance(message, str): print(f"{self.name} received a text message from {sender.name}: {message}") elif isinstance(message, dict): - print(f"{self.name} received a dictionary message from {sender.name}: {message}") + print( + f"{self.name} received a dictionary message from {sender.name}: {message}" + ) ``` ### Step 2: Create Custom Worker Instances @@ -355,7 +373,9 @@ Customize the `generate_reply()` method to allow your workers to generate replie ```python class CustomWorker(AbstractWorker): - def generate_reply(self, messages: Optional[List[Dict]] = None, sender=None, **kwargs) -> Union[str, Dict, None]: + def generate_reply( + self, messages: Optional[List[Dict]] = None, sender=None, **kwargs + ) -> Union[str, Dict, None]: if messages: # Generate a reply based on received messages return f"Received {len(messages)} messages from {sender.name}." diff --git a/docs/swarms/workers/index.md b/docs/swarms/workers/index.md index 9cf75e8b..3662fd8a 100644 --- a/docs/swarms/workers/index.md +++ b/docs/swarms/workers/index.md @@ -49,11 +49,11 @@ Makes the Worker class callable. When an instance of the class is called, it wil ### **Example 1**: Basic usage with default parameters: ```python -from swarms.models import OpenAIChat from swarms import Worker +from swarms.models import OpenAIChat llm = OpenAIChat( - #enter your api key + # enter your api key openai_api_key="", temperature=0.5, ) @@ -195,17 +195,16 @@ response = node.run(task) # Print the response print(response) - ``` ### **Example 3**: Usage with human in the loop: ```python -from swarms.models import OpenAIChat from swarms import Worker +from swarms.models import OpenAIChat llm = OpenAIChat( - #enter your api key + # enter your api key openai_api_key="", temperature=0.5, ) @@ -223,7 +222,6 @@ node = Worker( task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." response = node.run(task) print(response) - ``` ## **Mathematical Description**: diff --git a/playground/agents/multi_modal_auto_agent_example.py b/playground/agents/multi_modal_auto_agent_example.py index d32a6221..4204f48c 100644 --- a/playground/agents/multi_modal_auto_agent_example.py +++ b/playground/agents/multi_modal_auto_agent_example.py @@ -1,6 +1,8 @@ # Description: This is an example of how to use the Agent class to run a multi-modal workflow import os + from dotenv import load_dotenv + from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.structs import Agent diff --git a/playground/agents/multion_agent.py b/playground/agents/multion_agent.py index 230f098b..a8f5175d 100644 --- a/playground/agents/multion_agent.py +++ b/playground/agents/multion_agent.py @@ -1,7 +1,8 @@ import multion -from swarms.structs.concurrent_workflow import ConcurrentWorkflow + from swarms.models.base_llm import AbstractLLM from swarms.structs.agent import Agent +from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms.structs.task import Task diff --git a/playground/agents/perimeter_defense_agent.py b/playground/agents/perimeter_defense_agent.py index 3f5480c6..d235fa22 100644 --- a/playground/agents/perimeter_defense_agent.py +++ b/playground/agents/perimeter_defense_agent.py @@ -1,8 +1,10 @@ import os + from dotenv import load_dotenv + +import swarms.prompts.security_team as stsp from swarms.models import GPT4VisionAPI from swarms.structs import Agent -import swarms.prompts.security_team as stsp # Load environment variables and initialize the Vision API load_dotenv() diff --git a/playground/agents/simple_agent_example.py b/playground/agents/simple_agent_example.py index 5d9d57ed..b79b8f59 100644 --- a/playground/agents/simple_agent_example.py +++ b/playground/agents/simple_agent_example.py @@ -3,12 +3,11 @@ import os from dotenv import load_dotenv from swarms import ( - OpenAIChat, Conversation, + OpenAIChat, detect_markdown, extract_code_from_markdown, ) - from swarms.tools.code_executor import CodeExecutor conv = Conversation( diff --git a/playground/agents/tool_agent.py b/playground/agents/tool_agent.py index 0a95f42c..a6445b39 100644 --- a/playground/agents/tool_agent.py +++ b/playground/agents/tool_agent.py @@ -1,5 +1,6 @@ # Import necessary libraries from transformers import AutoModelForCausalLM, AutoTokenizer + from swarms import ToolAgent # Load the pre-trained model and tokenizer diff --git a/playground/agents/worker_example.py b/playground/agents/worker_example.py index 9e215e83..a2117e46 100644 --- a/playground/agents/worker_example.py +++ b/playground/agents/worker_example.py @@ -1,8 +1,10 @@ # Importing necessary modules import os + from dotenv import load_dotenv -from swarms.agents.worker_agent import Worker + from swarms import OpenAIChat +from swarms.agents.worker_agent import Worker # Loading environment variables from .env file load_dotenv() diff --git a/playground/demos/accountant_team/account_team2_example.py b/playground/demos/accountant_team/account_team2_example.py index 1b9d3659..6ad030a9 100644 --- a/playground/demos/accountant_team/account_team2_example.py +++ b/playground/demos/accountant_team/account_team2_example.py @@ -1,5 +1,7 @@ import os + from dotenv import load_dotenv + from swarms.models import Anthropic, OpenAIChat from swarms.prompts.accountant_swarm_prompts import ( DECISION_MAKING_PROMPT, diff --git a/playground/demos/ad_gen/ad_gen_example.py b/playground/demos/ad_gen/ad_gen_example.py index b665b63a..978ab502 100644 --- a/playground/demos/ad_gen/ad_gen_example.py +++ b/playground/demos/ad_gen/ad_gen_example.py @@ -1,9 +1,11 @@ -import random import os +import random + from dotenv import load_dotenv + from swarms.models import OpenAIChat -from swarms.structs import Agent from swarms.models.stable_diffusion import StableDiffusion +from swarms.structs import Agent load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/demos/assembly/assembly_example.py b/playground/demos/assembly/assembly_example.py index 704c80d4..7ac97ab0 100644 --- a/playground/demos/assembly/assembly_example.py +++ b/playground/demos/assembly/assembly_example.py @@ -1,5 +1,5 @@ -from swarms.structs import Agent from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarms.structs import Agent llm = GPT4VisionAPI() diff --git a/playground/demos/autotemp/autotemp_example.py b/playground/demos/autotemp/autotemp_example.py index baf8f091..f086f112 100644 --- a/playground/demos/autotemp/autotemp_example.py +++ b/playground/demos/autotemp/autotemp_example.py @@ -1,4 +1,5 @@ import re + from swarms.models.openai_models import OpenAIChat diff --git a/playground/demos/autotemp/blog_gen_example.py b/playground/demos/autotemp/blog_gen_example.py index e11a1521..fe2a2317 100644 --- a/playground/demos/autotemp/blog_gen_example.py +++ b/playground/demos/autotemp/blog_gen_example.py @@ -1,7 +1,9 @@ import os + +from autotemp import AutoTemp from termcolor import colored + from swarms.models import OpenAIChat -from autotemp import AutoTemp from swarms.structs import SequentialWorkflow diff --git a/playground/demos/developer_swarm/main_example.py b/playground/demos/developer_swarm/main_example.py index 18c0a346..0a2e2a95 100644 --- a/playground/demos/developer_swarm/main_example.py +++ b/playground/demos/developer_swarm/main_example.py @@ -6,7 +6,7 @@ This is a simple example of how to use the swarms library to create a swarm of d The swarm is composed of two agents: - Documentation agent: writes documentation for a given code snippet. - Tests agent: writes tests for a given code snippet. - + The swarm is initialized with a language model that is used by the agents to generate text. In this example, we use the OpenAI GPT-3 language model. Agent: @@ -14,6 +14,7 @@ Documentation agent -> Tests agent """ + import os from dotenv import load_dotenv diff --git a/playground/demos/education/education_example.py b/playground/demos/education/education_example.py index 266cede9..31c08f0d 100644 --- a/playground/demos/education/education_example.py +++ b/playground/demos/education/education_example.py @@ -1,8 +1,10 @@ import os + from dotenv import load_dotenv -from swarms.models import OpenAIChat -from swarms import Agent, SequentialWorkflow + import swarms.prompts.education as edu_prompts +from swarms import Agent, SequentialWorkflow +from swarms.models import OpenAIChat # Load environment variables load_dotenv() diff --git a/playground/demos/gemini_benchmarking/gemini_chat_example.py b/playground/demos/gemini_benchmarking/gemini_chat_example.py index 6d9dc7ae..2ea6a900 100644 --- a/playground/demos/gemini_benchmarking/gemini_chat_example.py +++ b/playground/demos/gemini_benchmarking/gemini_chat_example.py @@ -1,5 +1,7 @@ import os + from dotenv import load_dotenv + from swarms.models.gemini import Gemini from swarms.prompts.react import react_prompt diff --git a/playground/demos/gemini_benchmarking/gemini_react_example.py b/playground/demos/gemini_benchmarking/gemini_react_example.py index 022405e9..37765baf 100644 --- a/playground/demos/gemini_benchmarking/gemini_react_example.py +++ b/playground/demos/gemini_benchmarking/gemini_react_example.py @@ -1,5 +1,7 @@ import os + from dotenv import load_dotenv + from swarms.models.gemini import Gemini from swarms.prompts.react import react_prompt diff --git a/playground/demos/grupa/app_example.py b/playground/demos/grupa/app_example.py index 3ab52e22..ff5fc27d 100644 --- a/playground/demos/grupa/app_example.py +++ b/playground/demos/grupa/app_example.py @@ -1,12 +1,12 @@ import os from dotenv import load_dotenv +from termcolor import colored from swarms.models import OpenAIChat from swarms.prompts.code_interpreter import CODE_INTERPRETER +from swarms.prompts.programming import DOCUMENTATION_SOP, TEST_SOP from swarms.structs import Agent -from swarms.prompts.programming import TEST_SOP, DOCUMENTATION_SOP -from termcolor import colored load_dotenv() diff --git a/playground/demos/jarvis_multi_modal_auto_agent/jarvis_example.py b/playground/demos/jarvis_multi_modal_auto_agent/jarvis_example.py index 05cd4fff..cce61fba 100644 --- a/playground/demos/jarvis_multi_modal_auto_agent/jarvis_example.py +++ b/playground/demos/jarvis_multi_modal_auto_agent/jarvis_example.py @@ -1,9 +1,8 @@ -from swarms.structs import Agent from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.prompts.multi_modal_autonomous_instruction_prompt import ( MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1, ) - +from swarms.structs import Agent llm = GPT4VisionAPI() diff --git a/playground/demos/langchain_example/langchain_example.py b/playground/demos/langchain_example/langchain_example.py index 803e7857..0e47684e 100644 --- a/playground/demos/langchain_example/langchain_example.py +++ b/playground/demos/langchain_example/langchain_example.py @@ -1,8 +1,10 @@ import os + from dotenv import load_dotenv -from swarms import Agent from langchain.llms import OpenAIChat +from swarms import Agent + # Loading environment variables from .env file load_dotenv() diff --git a/playground/demos/logistics/logistics_example.py b/playground/demos/logistics/logistics_example.py index 108ec702..48d8b9ce 100644 --- a/playground/demos/logistics/logistics_example.py +++ b/playground/demos/logistics/logistics_example.py @@ -1,16 +1,18 @@ -from swarms.structs import Agent import os + from dotenv import load_dotenv + from swarms.models import GPT4VisionAPI from swarms.prompts.logistics import ( + Efficiency_Agent_Prompt, Health_Security_Agent_Prompt, - Quality_Control_Agent_Prompt, Productivity_Agent_Prompt, + Quality_Control_Agent_Prompt, Safety_Agent_Prompt, Security_Agent_Prompt, Sustainability_Agent_Prompt, - Efficiency_Agent_Prompt, ) +from swarms.structs import Agent # from swarms.utils.banana_wrapper import banana diff --git a/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py b/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py index 74be8d5a..007776ac 100644 --- a/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py +++ b/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py @@ -1,6 +1,5 @@ -from swarms.structs import Agent from swarms.models.gpt4_vision_api import GPT4VisionAPI - +from swarms.structs import Agent llm = GPT4VisionAPI() diff --git a/playground/demos/multimodal_tot/main_example.py b/playground/demos/multimodal_tot/main_example.py index 2d5ed653..2a0494dc 100644 --- a/playground/demos/multimodal_tot/main_example.py +++ b/playground/demos/multimodal_tot/main_example.py @@ -2,8 +2,8 @@ Multi Modal tree of thoughts that leverages the GPT-4 language model and the Stable Diffusion model to generate a multimodal output and evaluate the output based a metric from 0.0 to 1.0 and then run a search algorithm using DFS and BFS and return the best output. - - + + task: Generate an image of a swarm of bees -> Image generator -> GPT4V evaluates the img from 0.0 to 1.0 -> DFS/BFS -> return the best output @@ -16,10 +16,12 @@ task: Generate an image of a swarm of bees -> Image generator -> GPT4V evaluates """ import os + from dotenv import load_dotenv +from termcolor import colored + from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.models.stable_diffusion import StableDiffusion -from termcolor import colored # Load the environment variables load_dotenv() diff --git a/playground/demos/nutrition/nutrition_example.py b/playground/demos/nutrition/nutrition_example.py index 428560e3..b4331db6 100644 --- a/playground/demos/nutrition/nutrition_example.py +++ b/playground/demos/nutrition/nutrition_example.py @@ -1,7 +1,9 @@ -import os import base64 +import os + import requests from dotenv import load_dotenv + from swarms.models import OpenAIChat from swarms.structs import Agent diff --git a/playground/demos/personal_assistant/better_communication_example.py b/playground/demos/personal_assistant/better_communication_example.py index c6e79eb7..e0ff75cc 100644 --- a/playground/demos/personal_assistant/better_communication_example.py +++ b/playground/demos/personal_assistant/better_communication_example.py @@ -1,5 +1,5 @@ -import time import os +import time import pygame import speech_recognition as sr diff --git a/playground/demos/personal_stylist/personal_stylist_example.py b/playground/demos/personal_stylist/personal_stylist_example.py index b8641aa3..dde64cb7 100644 --- a/playground/demos/personal_stylist/personal_stylist_example.py +++ b/playground/demos/personal_stylist/personal_stylist_example.py @@ -1,14 +1,16 @@ -from swarms.structs import Agent import os + from dotenv import load_dotenv + from swarms.models import GPT4VisionAPI from swarms.prompts.personal_stylist import ( - HAIRCUT_STYLIST_AGENT_PROMPT, - MAKEUP_STYLIST_AGENT_PROMPT, + ACCESSORIES_STYLIST_AGENT_PROMPT, BEARD_STYLIST_AGENT_PROMPT, CLOTHING_STYLIST_AGENT_PROMPT, - ACCESSORIES_STYLIST_AGENT_PROMPT, + HAIRCUT_STYLIST_AGENT_PROMPT, + MAKEUP_STYLIST_AGENT_PROMPT, ) +from swarms.structs import Agent # Load environment variables load_dotenv() diff --git a/playground/demos/positive_med/positive_med_example.py b/playground/demos/positive_med/positive_med_example.py index b92b9586..09cbb411 100644 --- a/playground/demos/positive_med/positive_med_example.py +++ b/playground/demos/positive_med/positive_med_example.py @@ -20,6 +20,7 @@ Distribution Agent: - Optimize writer prompt to create longer and more enjoyeable blogs - Use Local Models like Storywriter """ + import os from termcolor import colored diff --git a/playground/demos/security_team/IMG_1625.MOV b/playground/demos/security_team/IMG_1625.MOV new file mode 100644 index 00000000..9bbb3574 Binary files /dev/null and b/playground/demos/security_team/IMG_1625.MOV differ diff --git a/playground/demos/security_team/security_team_example.py b/playground/demos/security_team/security_team_example.py index f00b0295..d391fe32 100644 --- a/playground/demos/security_team/security_team_example.py +++ b/playground/demos/security_team/security_team_example.py @@ -1,8 +1,11 @@ import os + from dotenv import load_dotenv +from termcolor import colored + +import swarms.prompts.security_team as stsp from swarms.models import GPT4VisionAPI from swarms.structs import Agent -import swarms.prompts.security_team as stsp # Load environment variables and initialize the Vision API load_dotenv() @@ -11,25 +14,21 @@ api_key = os.getenv("OPENAI_API_KEY") llm = GPT4VisionAPI(openai_api_key=api_key) # Image for analysis -img = "bank_robbery.jpg" +# img = "IMG_1617.jpeg" +img = "ubase1.jpeg" +img2 = "ubase2.jpeg" # Initialize agents with respective prompts for security tasks crowd_analysis_agent = Agent( + agent_name="Crowd Analysis Agent", llm=llm, sop=stsp.CROWD_ANALYSIS_AGENT_PROMPT, max_loops=1, multi_modal=True, ) -# Facial Recognition Agent is currently not operational -# facial_recognition_agent = Agent( -# llm=llm, -# sop=stsp.FACIAL_RECOGNITION_AGENT_PROMPT, -# max_loops=1, -# multi_modal=True, -# ) - weapon_detection_agent = Agent( + agent_name="Weapon Detection Agent", llm=llm, sop=stsp.WEAPON_DETECTION_AGENT_PROMPT, max_loops=1, @@ -37,6 +36,7 @@ weapon_detection_agent = Agent( ) surveillance_monitoring_agent = Agent( + agent_name="Surveillance Monitoring Agent", llm=llm, sop=stsp.SURVEILLANCE_MONITORING_AGENT_PROMPT, max_loops=1, @@ -44,37 +44,27 @@ surveillance_monitoring_agent = Agent( ) emergency_response_coordinator = Agent( + agent_name="Emergency Response Coordinator", # "Emergency Response Coordinator llm=llm, sop=stsp.EMERGENCY_RESPONSE_COORDINATOR_PROMPT, max_loops=1, multi_modal=True, ) -# Run agents with respective tasks on the same image -crowd_analysis = crowd_analysis_agent.run( - "Analyze the crowd dynamics in the scene", img -) - -# Facial Recognition Agent is currently not operational -# facial_recognition_analysis = facial_recognition_agent.run( -# "Identify any known individuals in the scene", img -# ) - +colored("Security Team Analysis", "green") +colored("Inspect the scene for any potential threats", "green") +colored("Weapon Detection Analysis", "green") weapon_detection_analysis = weapon_detection_agent.run( "Inspect the scene for any potential threats", img ) + +colored("Surveillance Monitoring Analysis", "cyan") surveillance_monitoring_analysis = surveillance_monitoring_agent.run( "Monitor the overall scene for unusual activities", img ) +colored("Emergency Response Analysis", "red") emergency_response_analysis = emergency_response_coordinator.run( "Develop a response plan based on the scene analysis", img ) - -# Process and output results for each task -# Example output (uncomment to use): -# print(f"Crowd Analysis: {crowd_analysis}") -# print(f"Weapon Detection Analysis: {weapon_detection_analysis}") -# print(f"Surveillance Monitoring Analysis: {surveillance_monitoring_analysis}") -# print(f"Emergency Response Analysis: {emergency_response_analysis}") diff --git a/playground/demos/simple_rag/simple_rag.py b/playground/demos/simple_rag/simple_rag.py index 129d59c4..c6ffbe15 100644 --- a/playground/demos/simple_rag/simple_rag.py +++ b/playground/demos/simple_rag/simple_rag.py @@ -1,4 +1,4 @@ -from swarms import Agent, OpenAIChat, ChromaDB +from swarms import Agent, ChromaDB, OpenAIChat # Making an instance of the ChromaDB class memory = ChromaDB( diff --git a/playground/demos/swarm_of_mma_manufacturing/main_example.py b/playground/demos/swarm_of_mma_manufacturing/main_example.py index 05b0e8e5..02a3cc1a 100644 --- a/playground/demos/swarm_of_mma_manufacturing/main_example.py +++ b/playground/demos/swarm_of_mma_manufacturing/main_example.py @@ -1,18 +1,19 @@ """ Swarm of multi modal autonomous agents for manufacturing! ---------------------------------------------------------- +--------------------------------------------------------- Health Security agent: Agent that monitors the health of working conditions: input image of factory output: health safety index 0.0 - 1.0 being the highest Quality Control agent: Agent that monitors the quality of the product: input image of product output: quality index 0.0 - 1.0 being the highest Productivity agent: Agent that monitors the productivity of the factory: input image of factory output: productivity index 0.0 - 1.0 being the highest Safety agent: Agent that monitors the safety of the factory: input image of factory output: safety index 0.0 - 1.0 being the highest Security agent: Agent that monitors the security of the factory: input image of factory output: security index 0.0 - 1.0 being the highest Sustainability agent: Agent that monitors the sustainability of the factory: input image of factory output: sustainability index 0.0 - 1.0 being the highest -Efficiency agent: Agent that monitors the efficiency of the factory: input image of factory output: efficiency index 0.0 - 1.0 being the highest +Efficiency agent: Agent that monitors the efficiency of the factory: input image of factory output: efficiency index 0.0 - 1.0 being the highest Agent: -health security agent -> quality control agent -> productivity agent -> safety agent -> security agent -> sustainability agent -> efficiency agent +health security agent -> quality control agent -> productivity agent -> safety agent -> security agent -> sustainability agent -> efficiency agent """ + import os from dotenv import load_dotenv diff --git a/playground/demos/urban_planning/urban_planning_example.py b/playground/demos/urban_planning/urban_planning_example.py index e85b4d31..2a52ced7 100644 --- a/playground/demos/urban_planning/urban_planning_example.py +++ b/playground/demos/urban_planning/urban_planning_example.py @@ -1,8 +1,10 @@ import os + from dotenv import load_dotenv -from swarms.models import OpenAIChat, GPT4VisionAPI -from swarms.structs import Agent, SequentialWorkflow + import swarms.prompts.urban_planning as upp +from swarms.models import GPT4VisionAPI, OpenAIChat +from swarms.structs import Agent, SequentialWorkflow # Load environment variables load_dotenv() diff --git a/playground/diy/hierchical_example.py b/playground/diy/hierchical_example.py index 0734c4f6..73b58f45 100644 --- a/playground/diy/hierchical_example.py +++ b/playground/diy/hierchical_example.py @@ -1,6 +1,5 @@ from swarms import HierarchicalSwarm - swarm = HierarchicalSwarm( openai_api_key="key", model_type="openai", diff --git a/playground/memory/qdrant/usage_example.py b/playground/memory/qdrant/usage_example.py index 2b7c4a8e..b85f3089 100644 --- a/playground/memory/qdrant/usage_example.py +++ b/playground/memory/qdrant/usage_example.py @@ -1,4 +1,5 @@ from langchain.document_loaders import CSVLoader + from swarms.memory import qdrant loader = CSVLoader( diff --git a/playground/models/anthropic_example.py b/playground/models/anthropic_example.py index 940892ca..0f966b6b 100644 --- a/playground/models/anthropic_example.py +++ b/playground/models/anthropic_example.py @@ -1,6 +1,5 @@ from swarms.models.anthropic import Anthropic - model = Anthropic(anthropic_api_key="") diff --git a/playground/models/bingchat_example.py b/playground/models/bingchat_example.py index 2af8472c..05e912c6 100644 --- a/playground/models/bingchat_example.py +++ b/playground/models/bingchat_example.py @@ -1,8 +1,9 @@ +import os + +from swarms.models import OpenAIChat from swarms.models.bing_chat import BingChat -from swarms.workers.worker import Worker from swarms.tools.autogpt import EdgeGPTTool, tool -from swarms.models import OpenAIChat -import os +from swarms.workers.worker import Worker api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/models/cohere_example.py b/playground/models/cohere_example.py index eb389db0..3a54956a 100644 --- a/playground/models/cohere_example.py +++ b/playground/models/cohere_example.py @@ -1,6 +1,5 @@ from swarms.models.cohere_chat import Cohere - cohere = Cohere(model="command-light", cohere_api_key="") out = cohere("Hello, how are you?") diff --git a/playground/models/dalle3_concurrent_example.py b/playground/models/dalle3_concurrent_example.py index de7f9cbb..e31f1cd8 100644 --- a/playground/models/dalle3_concurrent_example.py +++ b/playground/models/dalle3_concurrent_example.py @@ -1,12 +1,14 @@ """ -User task ->> GPT4 for prompt enrichment ->> Dalle3V for image generation -->> GPT4Vision for image captioning ->> Dalle3 better image +User task ->> GPT4 for prompt enrichment ->> Dalle3V for image generation +->> GPT4Vision for image captioning ->> Dalle3 better image """ -from swarms.models.dalle3 import Dalle3 + import os +from swarms.models.dalle3 import Dalle3 + api_key = os.environ["OPENAI_API_KEY"] dalle3 = Dalle3(openai_api_key=api_key, n=1) diff --git a/playground/models/distilled_whiserpx_example.py b/playground/models/distilled_whiserpx_example.py index 0742a1bc..1f6f0bc1 100644 --- a/playground/models/distilled_whiserpx_example.py +++ b/playground/models/distilled_whiserpx_example.py @@ -1,4 +1,5 @@ import asyncio + from swarms.models.distilled_whisperx import DistilWhisperModel model_wrapper = DistilWhisperModel() diff --git a/playground/models/gemini_example.py b/playground/models/gemini_example.py index 42fa4e74..75553bfc 100644 --- a/playground/models/gemini_example.py +++ b/playground/models/gemini_example.py @@ -1,5 +1,7 @@ import os + from dotenv import load_dotenv + from swarms.models.gemini import Gemini load_dotenv() diff --git a/playground/models/gpt4_v_example.py b/playground/models/gpt4_v_example.py index 822ec726..5c7a889e 100644 --- a/playground/models/gpt4_v_example.py +++ b/playground/models/gpt4_v_example.py @@ -1,6 +1,5 @@ from swarms.models.gpt4v import GPT4Vision - gpt4vision = GPT4Vision(openai_api_key="") img = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0d/VFPt_Solenoid_correct2.svg/640px-VFPt_Solenoid_correct2.svg.png" diff --git a/playground/models/miqu.py b/playground/models/miqu.py index f3fc1b4d..a4c9430a 100644 --- a/playground/models/miqu.py +++ b/playground/models/miqu.py @@ -1,6 +1,5 @@ from swarms import Mistral - # Initialize the model model = Mistral( model_name="miqudev/miqu-1-70b", diff --git a/playground/models/roboflow_example.py b/playground/models/roboflow_example.py index e7cae29e..56572fd5 100644 --- a/playground/models/roboflow_example.py +++ b/playground/models/roboflow_example.py @@ -1,6 +1,5 @@ from swarms import RoboflowMultiModal - # Initialize the model model = RoboflowMultiModal( api_key="api", diff --git a/playground/models/tts_speech_example.py b/playground/models/tts_speech_example.py index be38912c..6c33f944 100644 --- a/playground/models/tts_speech_example.py +++ b/playground/models/tts_speech_example.py @@ -1,7 +1,9 @@ -from swarms import OpenAITTS import os + from dotenv import load_dotenv +from swarms import OpenAITTS + load_dotenv() tts = OpenAITTS( diff --git a/playground/structs/agent_with_longterm.py b/playground/structs/agent_with_longterm.py index e803d095..588d6546 100644 --- a/playground/structs/agent_with_longterm.py +++ b/playground/structs/agent_with_longterm.py @@ -3,7 +3,7 @@ import os from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct -from swarms import Agent, OpenAIChat, ChromaDB +from swarms import Agent, ChromaDB, OpenAIChat # Load the environment variables load_dotenv() diff --git a/playground/structs/agent_with_tools_example.py b/playground/structs/agent_with_tools_example.py index 44dcd8f6..dc0dff4b 100644 --- a/playground/structs/agent_with_tools_example.py +++ b/playground/structs/agent_with_tools_example.py @@ -1,11 +1,11 @@ """ - - -tool decorated func [search_api] -> agent which parses the docs of the tool func + + +tool decorated func [search_api] -> agent which parses the docs of the tool func -> injected into prompt -> agent will output json containing tool usage -> agent output will be parsed -> tool executed -> terminal response can be returned to agent for self-healing - - + + """ import os @@ -13,7 +13,7 @@ import os from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct -from swarms import OpenAIChat, Agent +from swarms import Agent, OpenAIChat from swarms.tools.tool import tool # Load the environment variables diff --git a/playground/structs/autoscaler_example.py b/playground/structs/autoscaler_example.py index 8b808db6..aa7cf0c0 100644 --- a/playground/structs/autoscaler_example.py +++ b/playground/structs/autoscaler_example.py @@ -7,7 +7,6 @@ from swarms.models import OpenAIChat from swarms.structs import Agent from swarms.structs.autoscaler import AutoScaler - # Load the environment variables load_dotenv() diff --git a/playground/structs/concurrent_workflow_example.py b/playground/structs/concurrent_workflow_example.py index 98531388..8d8babde 100644 --- a/playground/structs/concurrent_workflow_example.py +++ b/playground/structs/concurrent_workflow_example.py @@ -1,6 +1,8 @@ import os + from dotenv import load_dotenv -from swarms import OpenAIChat, Task, ConcurrentWorkflow, Agent + +from swarms import Agent, ConcurrentWorkflow, OpenAIChat, Task # Load environment variables from .env file load_dotenv() diff --git a/playground/structs/debate_example.py b/playground/structs/debate_example.py index 5108d527..7cf0290b 100644 --- a/playground/structs/debate_example.py +++ b/playground/structs/debate_example.py @@ -5,10 +5,8 @@ import tenacity from langchain.chat_models import ChatOpenAI from langchain.output_parsers import RegexParser from langchain.prompts import PromptTemplate -from langchain.schema import ( - HumanMessage, - SystemMessage, -) +from langchain.schema import HumanMessage, SystemMessage + from swarms import Worker diff --git a/playground/structs/dialogue_simulator_example.py b/playground/structs/dialogue_simulator_example.py index ee9241b6..14c35b7e 100644 --- a/playground/structs/dialogue_simulator_example.py +++ b/playground/structs/dialogue_simulator_example.py @@ -1,6 +1,6 @@ +from swarms.models import OpenAIChat from swarms.swarms import DialogueSimulator from swarms.workers.worker import Worker -from swarms.models import OpenAIChat llm = OpenAIChat( model_name="gpt-4", openai_api_key="api-key", temperature=0.5 diff --git a/playground/structs/groupchat_example.py b/playground/structs/groupchat_example.py index b9ab5761..5c9d1a7c 100644 --- a/playground/structs/groupchat_example.py +++ b/playground/structs/groupchat_example.py @@ -1,6 +1,5 @@ -from swarms import OpenAI, Agent -from swarms.structs.groupchat import GroupChatManager, GroupChat - +from swarms import Agent, OpenAI +from swarms.structs.groupchat import GroupChat, GroupChatManager api_key = "" diff --git a/playground/structs/gui_app_example.py b/playground/structs/gui_app_example.py index 751cb03a..662f8a46 100644 --- a/playground/structs/gui_app_example.py +++ b/playground/structs/gui_app_example.py @@ -1,6 +1,5 @@ from swarms import HierarchicalSwarm - # Retrieve your API key from the environment or replace with your actual key api_key = "sksdsds" diff --git a/playground/structs/majority_voting.py b/playground/structs/majority_voting.py index 5eefb8ab..cd8de04a 100644 --- a/playground/structs/majority_voting.py +++ b/playground/structs/majority_voting.py @@ -1,4 +1,4 @@ -from swarms import Agent, OpenAIChat, MajorityVoting +from swarms import Agent, MajorityVoting, OpenAIChat # Initialize the llm llm = OpenAIChat() diff --git a/playground/structs/multi_agent_debate_example.py b/playground/structs/multi_agent_debate_example.py index 6124a21c..7a456fbc 100644 --- a/playground/structs/multi_agent_debate_example.py +++ b/playground/structs/multi_agent_debate_example.py @@ -1,9 +1,9 @@ +from swarms.models import OpenAIChat from swarms.swarms.multi_agent_debate import ( MultiAgentDebate, select_speaker, ) from swarms.workers.worker import Worker -from swarms.models import OpenAIChat llm = OpenAIChat() diff --git a/playground/structs/multi_modal_flow_example.py b/playground/structs/multi_modal_flow_example.py index b29c8bfd..ffc59367 100644 --- a/playground/structs/multi_modal_flow_example.py +++ b/playground/structs/multi_modal_flow_example.py @@ -1,5 +1,5 @@ # This might not work in the beginning but it's a starting point -from swarms.structs import Agent, GPT4V +from swarms.structs import GPT4V, Agent llm = GPT4V() diff --git a/playground/structs/multi_modal_rag_agent.py b/playground/structs/multi_modal_rag_agent.py index b7944638..ff758e28 100644 --- a/playground/structs/multi_modal_rag_agent.py +++ b/playground/structs/multi_modal_rag_agent.py @@ -1,10 +1,12 @@ # Importing necessary modules import os + from dotenv import load_dotenv + from swarms import Agent, OpenAIChat -from swarms.tools.tool import tool -from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT from swarms.memory.chroma_db import ChromaDB +from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT +from swarms.tools.tool import tool # Loading environment variables from .env file load_dotenv() diff --git a/playground/structs/orchestrate_example.py b/playground/structs/orchestrate_example.py index b0e17588..6b91b74f 100644 --- a/playground/structs/orchestrate_example.py +++ b/playground/structs/orchestrate_example.py @@ -1,4 +1,4 @@ -from swarms import Worker, Orchestrator +from swarms import Orchestrator, Worker node = Worker( openai_api_key="", diff --git a/playground/structs/orchestrator_example.py b/playground/structs/orchestrator_example.py index b0e17588..6b91b74f 100644 --- a/playground/structs/orchestrator_example.py +++ b/playground/structs/orchestrator_example.py @@ -1,4 +1,4 @@ -from swarms import Worker, Orchestrator +from swarms import Orchestrator, Worker node = Worker( openai_api_key="", diff --git a/playground/structs/recursive_example.py b/playground/structs/recursive_example.py index 9760b606..cc3dcf0f 100644 --- a/playground/structs/recursive_example.py +++ b/playground/structs/recursive_example.py @@ -1,6 +1,8 @@ import os + from dotenv import load_dotenv -from swarms import OpenAIChat, Task, RecursiveWorkflow, Agent + +from swarms import Agent, OpenAIChat, RecursiveWorkflow, Task # Load environment variables from .env file load_dotenv() diff --git a/playground/structs/sequential_workflow_example.py b/playground/structs/sequential_workflow_example.py index 7fa110bc..f0ba0ee4 100644 --- a/playground/structs/sequential_workflow_example.py +++ b/playground/structs/sequential_workflow_example.py @@ -1,4 +1,4 @@ -from swarms import OpenAIChat, Agent, Task, SequentialWorkflow +from swarms import Agent, OpenAIChat, SequentialWorkflow, Task # Example usage llm = OpenAIChat( diff --git a/playground/structs/swarm_network_example.py b/playground/structs/swarm_network_example.py index de9c53b6..1675ca8b 100644 --- a/playground/structs/swarm_network_example.py +++ b/playground/structs/swarm_network_example.py @@ -3,7 +3,7 @@ import os from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct -from swarms import OpenAIChat, Agent, SwarmNetwork +from swarms import Agent, OpenAIChat, SwarmNetwork # Load the environment variables load_dotenv() diff --git a/playground/structs/todo_app_example.py b/playground/structs/todo_app_example.py index 627c72df..981bf499 100644 --- a/playground/structs/todo_app_example.py +++ b/playground/structs/todo_app_example.py @@ -1,6 +1,5 @@ from swarms import HierarchicalSwarm - # Retrieve your API key from the environment or replace with your actual key api_key = "sksdsds" diff --git a/playground/structs/workflow_example.py b/playground/structs/workflow_example.py index 91bff00a..0d9f18c4 100644 --- a/playground/structs/workflow_example.py +++ b/playground/structs/workflow_example.py @@ -1,6 +1,5 @@ -from swarms.structs.workflow import Workflow from swarms.models import OpenAIChat - +from swarms.structs.workflow import Workflow llm = OpenAIChat() diff --git a/playground/swarms_example.ipynb b/playground/swarms_example.ipynb index 2d7779b1..cdd2ebb7 100644 --- a/playground/swarms_example.ipynb +++ b/playground/swarms_example.ipynb @@ -1,111 +1,112 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "private_outputs": true, - "provenance": [], - "gpuType": "T4" - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU" + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "private_outputs": true, + "provenance": [], + "gpuType": "T4" }, - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "cs5RHepmhkEh" - }, - "outputs": [], - "source": [ - "!pip3 install swarms" - ] - }, - { - "cell_type": "markdown", - "source": [ - "Copied from the repo, example.py\n", - "Enter your OpenAI API key here." - ], - "metadata": { - "id": "-d9k3egzgp2_" - } - }, - { - "cell_type": "code", - "source": [ - "from swarms.models import OpenAIChat\n", - "from swarms.structs import Agent\n", - "\n", - "api_key = \"\"\n", - "\n", - "# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC\n", - "llm = OpenAIChat(\n", - " # model_name=\"gpt-4\"\n", - " openai_api_key=api_key,\n", - " temperature=0.5,\n", - " # max_tokens=100,\n", - ")\n", - "\n", - "\n", - "## Initialize the workflow\n", - "agent = Agent(\n", - " llm=llm,\n", - " max_loops=5,\n", - " dashboard=True,\n", - " # tools = [search_api, slack, ]\n", - " # stopping_condition=None, # You can define a stopping condition as needed.\n", - " # loop_interval=1,\n", - " # retry_attempts=3,\n", - " # retry_interval=1,\n", - " # interactive=False, # Set to 'True' for interactive mode.\n", - " # dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.\n", - ")\n", - "\n", - "# out = agent.load_state(\"flow_state.json\")\n", - "# temp = agent.dynamic_temperature()\n", - "# filter = agent.add_response_filter(\"Trump\")\n", - "out = agent.run(\n", - " \"Generate a 10,000 word blog on mental clarity and the benefits of meditation.\"\n", - ")\n", - "# out = agent.validate_response(out)\n", - "# out = agent.analyze_feedback(out)\n", - "# out = agent.print_history_and_memory()\n", - "# # out = agent.save_state(\"flow_state.json\")\n", - "# print(out)" - ], - "metadata": { - "id": "K1Sbq4UkgVjk" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "source": [ - "Look at the log, which may be empty." - ], - "metadata": { - "id": "6VtgQ0F4BNc-" - } - }, - { - "cell_type": "code", - "source": [ - "!cat errors.txt" - ], - "metadata": { - "id": "RqL5LL3xBLWR" - }, - "execution_count": null, - "outputs": [] - } - ] + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cs5RHepmhkEh" + }, + "outputs": [], + "source": [ + "!pip3 install swarms" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Copied from the repo, example.py\n", + "Enter your OpenAI API key here." + ], + "metadata": { + "id": "-d9k3egzgp2_" + } + }, + { + "cell_type": "code", + "source": [ + "from swarms.models import OpenAIChat\n", + "from swarms.structs import Agent\n", + "\n", + "api_key = \"\"\n", + "\n", + "# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC\n", + "llm = OpenAIChat(\n", + " # model_name=\"gpt-4\"\n", + " openai_api_key=api_key,\n", + " temperature=0.5,\n", + " # max_tokens=100,\n", + ")\n", + "\n", + "\n", + "## Initialize the workflow\n", + "agent = Agent(\n", + " llm=llm,\n", + " max_loops=5,\n", + " dashboard=True,\n", + " # tools = [search_api, slack, ]\n", + " # stopping_condition=None, # You can define a stopping condition as needed.\n", + " # loop_interval=1,\n", + " # retry_attempts=3,\n", + " # retry_interval=1,\n", + " # interactive=False, # Set to 'True' for interactive mode.\n", + " # dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.\n", + ")\n", + "\n", + "# out = agent.load_state(\"flow_state.json\")\n", + "# temp = agent.dynamic_temperature()\n", + "# filter = agent.add_response_filter(\"Trump\")\n", + "out = agent.run(\n", + " \"Generate a 10,000 word blog on mental clarity and the benefits\"\n", + " \" of meditation.\"\n", + ")\n", + "# out = agent.validate_response(out)\n", + "# out = agent.analyze_feedback(out)\n", + "# out = agent.print_history_and_memory()\n", + "# # out = agent.save_state(\"flow_state.json\")\n", + "# print(out)" + ], + "metadata": { + "id": "K1Sbq4UkgVjk" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Look at the log, which may be empty." + ], + "metadata": { + "id": "6VtgQ0F4BNc-" + } + }, + { + "cell_type": "code", + "source": [ + "!cat errors.txt" + ], + "metadata": { + "id": "RqL5LL3xBLWR" + }, + "execution_count": null, + "outputs": [] + } + ] } \ No newline at end of file diff --git a/playground/tools/agent_with_tools_example.py b/playground/tools/agent_with_tools_example.py index 3bad0b1d..0d736a16 100644 --- a/playground/tools/agent_with_tools_example.py +++ b/playground/tools/agent_with_tools_example.py @@ -1,7 +1,9 @@ import os + +from dotenv import load_dotenv + from swarms.models import OpenAIChat from swarms.structs import Agent -from dotenv import load_dotenv load_dotenv() diff --git a/scripts/auto_tests_docs/auto_docs.py b/scripts/auto_tests_docs/auto_docs.py index db1f2caf..570793c8 100644 --- a/scripts/auto_tests_docs/auto_docs.py +++ b/scripts/auto_tests_docs/auto_docs.py @@ -7,15 +7,12 @@ from dotenv import load_dotenv from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP from swarms import OpenAIChat - -########## - -from swarms.structs.tool_json_schema import JSON -from swarms.structs.majority_voting import ( - MajorityVoting, -) +from swarms.structs.majority_voting import MajorityVoting from swarms.structs.stackoverflow_swarm import StackOverflowSwarm from swarms.structs.task_queue_base import TaskQueueBase +from swarms.structs.tool_json_schema import JSON + +########## #################### diff --git a/scripts/auto_tests_docs/auto_docs_omni.py b/scripts/auto_tests_docs/auto_docs_omni.py index 3ae647a7..7fd3cde6 100644 --- a/scripts/auto_tests_docs/auto_docs_omni.py +++ b/scripts/auto_tests_docs/auto_docs_omni.py @@ -3,10 +3,10 @@ import os import threading from dotenv import load_dotenv + from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP from swarms import OpenAIChat - ########### diff --git a/scripts/auto_tests_docs/auto_tests.py b/scripts/auto_tests_docs/auto_tests.py index 96ab8b8b..c9d7c95e 100644 --- a/scripts/auto_tests_docs/auto_tests.py +++ b/scripts/auto_tests_docs/auto_tests.py @@ -2,17 +2,18 @@ import inspect import os import re import threading -from swarms import OpenAIChat + +######## +from dotenv import load_dotenv + from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT +from swarms import OpenAIChat ######### from swarms.memory.dict_internal_memory import DictInternalMemory from swarms.memory.dict_shared_memory import DictSharedMemory from swarms.memory.lanchain_chroma import LangchainChromaVectorMemory -######## -from dotenv import load_dotenv - load_dotenv() api_key = os.getenv("OPENAI_API_KEY") diff --git a/scripts/auto_tests_docs/update_mkdocs.py b/scripts/auto_tests_docs/update_mkdocs.py index dfde53cb..d169a15f 100644 --- a/scripts/auto_tests_docs/update_mkdocs.py +++ b/scripts/auto_tests_docs/update_mkdocs.py @@ -14,7 +14,7 @@ def update_mkdocs( - base_path: The base path where documentation Markdown files are stored. - mkdocs_file: The path to the mkdocs.yml file. """ - with open(mkdocs_file, "r") as file: + with open(mkdocs_file) as file: mkdocs_config = yaml.safe_load(file) # Find or create the 'zeta.nn.modules' section in 'nav' diff --git a/scripts/get_package_requirements.py b/scripts/get_package_requirements.py index 9494409b..99e139da 100644 --- a/scripts/get_package_requirements.py +++ b/scripts/get_package_requirements.py @@ -3,7 +3,7 @@ import pkg_resources def get_package_versions(requirements_path, output_path): try: - with open(requirements_path, "r") as file: + with open(requirements_path) as file: requirements = file.readlines() except FileNotFoundError: print(f"Error: The file '{requirements_path}' was not found.") diff --git a/scripts/requirementstxt_to_pyproject.py b/scripts/requirementstxt_to_pyproject.py index 5710db61..811ac7be 100644 --- a/scripts/requirementstxt_to_pyproject.py +++ b/scripts/requirementstxt_to_pyproject.py @@ -1,10 +1,10 @@ -import toml import pkg_resources +import toml def update_pyproject_versions(pyproject_path): try: - with open(pyproject_path, "r") as file: + with open(pyproject_path) as file: data = toml.load(file) except FileNotFoundError: print(f"Error: The file '{pyproject_path}' was not found.") diff --git a/swarms/__init__.py b/swarms/__init__.py index bece68ef..66063891 100644 --- a/swarms/__init__.py +++ b/swarms/__init__.py @@ -5,17 +5,13 @@ bootup() from swarms.agents import * # noqa: E402, F403 -from swarms.structs import * # noqa: E402, F403 -from swarms.models import * # noqa: E402, F403 -from swarms.telemetry import * # noqa: E402, F403 -from swarms.utils import * # noqa: E402, F403 -from swarms.prompts import * # noqa: E402, F403 -from swarms.tokenizers import * # noqa: E402, F403 -from swarms.loaders import * # noqa: E402, F403 from swarms.artifacts import * # noqa: E402, F403 from swarms.chunkers import * # noqa: E402, F403 -from swarms.structs import * # noqa: E402, F403 -from swarms.agents import * # noqa: E402, F403 +from swarms.loaders import * # noqa: E402, F403 from swarms.models import * # noqa: E402, F403 -from swarms.tools import * # noqa: E402, F403 +from swarms.prompts import * # noqa: E402, F403 +from swarms.structs import * # noqa: E402, F403 from swarms.telemetry import * # noqa: E402, F403 +from swarms.tokenizers import * # noqa: E402, F403 +from swarms.tools import * # noqa: E402, F403 +from swarms.utils import * # noqa: E402, F403 diff --git a/swarms/agents/__init__.py b/swarms/agents/__init__.py index 461baa16..b213748e 100644 --- a/swarms/agents/__init__.py +++ b/swarms/agents/__init__.py @@ -1,3 +1,4 @@ +from swarms.agents.agent_wrapper import agent_wrapper from swarms.agents.base import AbstractAgent from swarms.agents.omni_modal_agent import OmniModalAgent from swarms.agents.simple_agent import SimpleAgent @@ -15,7 +16,6 @@ from swarms.agents.stopping_conditions import ( ) from swarms.agents.tool_agent import ToolAgent from swarms.agents.worker_agent import Worker -from swarms.agents.agent_wrapper import agent_wrapper __all__ = [ "AbstractAgent", diff --git a/swarms/agents/base.py b/swarms/agents/base.py index 22c0addc..08cf07bf 100644 --- a/swarms/agents/base.py +++ b/swarms/agents/base.py @@ -37,7 +37,6 @@ class AbstractAgent: def memory(self, memory_store): """init memory""" - pass def reset(self): """(Abstract method) Reset the agent.""" diff --git a/swarms/agents/developer_agents.py b/swarms/agents/developer_agents.py index 95b68683..4392af03 100644 --- a/swarms/agents/developer_agents.py +++ b/swarms/agents/developer_agents.py @@ -1,6 +1,6 @@ -from swarms.structs.agent import Agent -from swarms.prompts.tests import TEST_WRITER_SOP_PROMPT from swarms.prompts.documentation import DOCUMENTATION_WRITER_SOP +from swarms.prompts.tests import TEST_WRITER_SOP_PROMPT +from swarms.structs.agent import Agent class UnitTesterAgent: diff --git a/swarms/agents/omni_modal_agent.py b/swarms/agents/omni_modal_agent.py index 113ec461..8f2dabc5 100644 --- a/swarms/agents/omni_modal_agent.py +++ b/swarms/agents/omni_modal_agent.py @@ -142,5 +142,4 @@ class OmniModalAgent: print(token) """ - for token in response.split(): - yield token + yield from response.split() diff --git a/swarms/agents/simple_agent.py b/swarms/agents/simple_agent.py index 757715dd..ba67933c 100644 --- a/swarms/agents/simple_agent.py +++ b/swarms/agents/simple_agent.py @@ -1,9 +1,10 @@ -from swarms.structs.conversation import Conversation -from swarms.models.base_llm import AbstractLLM -from typing import Any import importlib import pkgutil +from typing import Any + import swarms.models +from swarms.models.base_llm import AbstractLLM +from swarms.structs.conversation import Conversation def get_llm_by_name(name: str): diff --git a/swarms/artifacts/base_artifact.py b/swarms/artifacts/base_artifact.py index 3edf7ad3..aad07a7b 100644 --- a/swarms/artifacts/base_artifact.py +++ b/swarms/artifacts/base_artifact.py @@ -1,9 +1,10 @@ from __future__ import annotations -from typing import Any + import json import uuid from abc import ABC, abstractmethod from dataclasses import dataclass +from typing import Any @dataclass diff --git a/swarms/artifacts/text_artifact.py b/swarms/artifacts/text_artifact.py index e800ad51..5fdfe4fa 100644 --- a/swarms/artifacts/text_artifact.py +++ b/swarms/artifacts/text_artifact.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import Optional + from dataclasses import dataclass, field + from swarms.artifacts.base_artifact import BaseArtifact from swarms.tokenizers.base_tokenizer import BaseTokenizer @@ -33,7 +34,7 @@ class TextArtifact(BaseArtifact): _embedding: list[float] = field(default_factory=list) @property - def embedding(self) -> Optional[list[float]]: + def embedding(self) -> list[float] | None: return None if len(self._embedding) == 0 else self._embedding def __add__(self, other: BaseArtifact) -> TextArtifact: @@ -42,7 +43,7 @@ class TextArtifact(BaseArtifact): def __bool__(self) -> bool: return bool(self.value.strip()) - def generate_embedding(self, model) -> Optional[list[float]]: + def generate_embedding(self, model) -> list[float] | None: self._embedding.clear() self._embedding.extend(model.embed_string(str(self.value))) diff --git a/swarms/chunkers/__init__.py b/swarms/chunkers/__init__.py index ccc8814e..b55d15c2 100644 --- a/swarms/chunkers/__init__.py +++ b/swarms/chunkers/__init__.py @@ -1,5 +1,5 @@ -from swarms.chunkers.chunk_seperator import ChunkSeparator from swarms.chunkers.base_chunker import BaseChunker +from swarms.chunkers.chunk_seperator import ChunkSeparator from swarms.chunkers.text_chunker import TextChunker __all__ = ["ChunkSeparator", "BaseChunker", "TextChunker"] diff --git a/swarms/chunkers/base_chunker.py b/swarms/chunkers/base_chunker.py index a63b3ac7..47f73a4e 100644 --- a/swarms/chunkers/base_chunker.py +++ b/swarms/chunkers/base_chunker.py @@ -1,7 +1,8 @@ from __future__ import annotations + from abc import ABC -from typing import Optional from dataclasses import dataclass, field + from swarms.artifacts.text_artifact import TextArtifact from swarms.chunkers.chunk_seperator import ChunkSeparator from swarms.tokenizers.base_tokenizer import BaseTokenizer @@ -47,7 +48,7 @@ class BaseChunker(ABC): def _chunk_recursively( self, chunk: str, - current_separator: Optional[ChunkSeparator] = None, + current_separator: ChunkSeparator | None = None, ) -> list[str]: """ Recursively chunk the given chunk into smaller subchunks. diff --git a/swarms/cli/_cli.py b/swarms/cli/_cli.py index 9b1365ae..831f1718 100644 --- a/swarms/cli/_cli.py +++ b/swarms/cli/_cli.py @@ -1,4 +1,5 @@ import argparse + from swarms.agents.simple_agent import SimpleAgent, get_llm_by_name diff --git a/swarms/cli/run_file.py b/swarms/cli/run_file.py index 171c6c56..60b621a3 100644 --- a/swarms/cli/run_file.py +++ b/swarms/cli/run_file.py @@ -1,5 +1,5 @@ -import sys import subprocess +import sys def run_file(filename: str): diff --git a/swarms/loaders/pdf_loader.py b/swarms/loaders/pdf_loader.py index 17e0b465..34085efb 100644 --- a/swarms/loaders/pdf_loader.py +++ b/swarms/loaders/pdf_loader.py @@ -2,7 +2,7 @@ from __future__ import annotations from dataclasses import dataclass from pathlib import Path -from typing import IO, Dict, List, Optional +from typing import IO from pypdf import PdfReader @@ -50,27 +50,27 @@ class PDFLoader: def load( self, source: str | IO | Path, - password: Optional[str] = None, + password: str | None = None, *args, **kwargs, - ) -> List[TextArtifact]: + ) -> list[TextArtifact]: return self._load_pdf(source, password) def load_collection( self, - sources: List[str | IO | Path], - password: Optional[str] = None, + sources: list[str | IO | Path], + password: str | None = None, *args, **kwargs, - ) -> Dict[str, List[TextArtifact]]: + ) -> dict[str, list[TextArtifact]]: return { str_to_hash(str(s)): self._load_pdf(s, password) for s in sources } def _load_pdf( - self, stream: str | IO | Path, password: Optional[str] - ) -> List[TextArtifact]: + self, stream: str | IO | Path, password: str | None + ) -> list[TextArtifact]: reader = PdfReader(stream, strict=True, password=password) return [ TextArtifact(text=p.extract_text()) for p in reader.pages diff --git a/swarms/memory/__init__.py b/swarms/memory/__init__.py index 89c2fddf..72318d28 100644 --- a/swarms/memory/__init__.py +++ b/swarms/memory/__init__.py @@ -1,14 +1,14 @@ -from swarms.memory.base_vectordb import AbstractVectorDatabase -from swarms.memory.base_db import AbstractDatabase -from swarms.memory.short_term_memory import ShortTermMemory -from swarms.memory.sqlite import SQLiteDB -from swarms.memory.weaviate_db import WeaviateDB -from swarms.memory.visual_memory import VisualShortTermMemory from swarms.memory.action_subtask import ActionSubtaskEntry +from swarms.memory.base_db import AbstractDatabase +from swarms.memory.base_vectordb import AbstractVectorDatabase from swarms.memory.chroma_db import ChromaDB from swarms.memory.dict_internal_memory import DictInternalMemory from swarms.memory.dict_shared_memory import DictSharedMemory from swarms.memory.lanchain_chroma import LangchainChromaVectorMemory +from swarms.memory.short_term_memory import ShortTermMemory +from swarms.memory.sqlite import SQLiteDB +from swarms.memory.visual_memory import VisualShortTermMemory +from swarms.memory.weaviate_db import WeaviateDB __all__ = [ "AbstractVectorDatabase", diff --git a/swarms/memory/base_db.py b/swarms/memory/base_db.py index 0501def7..eb3e6f00 100644 --- a/swarms/memory/base_db.py +++ b/swarms/memory/base_db.py @@ -21,8 +21,6 @@ class AbstractDatabase(ABC): """ - pass - @abstractmethod def close(self): """ @@ -32,8 +30,6 @@ class AbstractDatabase(ABC): """ - pass - @abstractmethod def execute_query(self, query): """ @@ -46,8 +42,6 @@ class AbstractDatabase(ABC): """ - pass - @abstractmethod def fetch_all(self): """ @@ -60,8 +54,6 @@ class AbstractDatabase(ABC): """ - pass - @abstractmethod def fetch_one(self): """ @@ -74,8 +66,6 @@ class AbstractDatabase(ABC): """ - pass - @abstractmethod def add(self, table, data): """ @@ -89,8 +79,6 @@ class AbstractDatabase(ABC): """ - pass - @abstractmethod def query(self, table, condition): """ @@ -107,8 +95,6 @@ class AbstractDatabase(ABC): """ - pass - @abstractmethod def get(self, table, id): """ @@ -125,8 +111,6 @@ class AbstractDatabase(ABC): """ - pass - @abstractmethod def update(self, table, id, data): """ @@ -141,8 +125,6 @@ class AbstractDatabase(ABC): """ - pass - @abstractmethod def delete(self, table, id): """ @@ -155,5 +137,3 @@ class AbstractDatabase(ABC): id (int): The ID of the record to be deleted. """ - - pass diff --git a/swarms/memory/base_vectordb.py b/swarms/memory/base_vectordb.py index 06f42007..6b34d244 100644 --- a/swarms/memory/base_vectordb.py +++ b/swarms/memory/base_vectordb.py @@ -21,8 +21,6 @@ class AbstractVectorDatabase(ABC): """ - pass - @abstractmethod def close(self): """ @@ -32,8 +30,6 @@ class AbstractVectorDatabase(ABC): """ - pass - @abstractmethod def query(self, query: str): """ @@ -46,8 +42,6 @@ class AbstractVectorDatabase(ABC): """ - pass - @abstractmethod def fetch_all(self): """ @@ -60,8 +54,6 @@ class AbstractVectorDatabase(ABC): """ - pass - @abstractmethod def fetch_one(self): """ @@ -74,8 +66,6 @@ class AbstractVectorDatabase(ABC): """ - pass - @abstractmethod def add(self, doc: str): """ @@ -89,8 +79,6 @@ class AbstractVectorDatabase(ABC): """ - pass - @abstractmethod def get(self, query: str): """ @@ -107,8 +95,6 @@ class AbstractVectorDatabase(ABC): """ - pass - @abstractmethod def update(self, doc): """ @@ -123,8 +109,6 @@ class AbstractVectorDatabase(ABC): """ - pass - @abstractmethod def delete(self, message): """ @@ -137,5 +121,3 @@ class AbstractVectorDatabase(ABC): id (int): The ID of the record to be deleted. """ - - pass diff --git a/swarms/memory/chroma_db.py b/swarms/memory/chroma_db.py index b2f1453f..dcb0e19f 100644 --- a/swarms/memory/chroma_db.py +++ b/swarms/memory/chroma_db.py @@ -1,16 +1,15 @@ -import os -import numpy as np import logging +import os import uuid -from typing import Optional, Callable, List +from typing import Callable, List, Optional import chromadb +import numpy as np from dotenv import load_dotenv from swarms.utils.data_to_text import data_to_text from swarms.utils.markdown_message import display_markdown_message - # Load environment variables load_dotenv() diff --git a/swarms/memory/cosine_similarity.py b/swarms/memory/cosine_similarity.py index 6e7b1df3..94c5e585 100644 --- a/swarms/memory/cosine_similarity.py +++ b/swarms/memory/cosine_similarity.py @@ -1,4 +1,5 @@ """Math utils.""" + import logging from typing import List, Optional, Tuple, Union diff --git a/swarms/memory/dict_shared_memory.py b/swarms/memory/dict_shared_memory.py index a8b78be7..f81e2fd4 100644 --- a/swarms/memory/dict_shared_memory.py +++ b/swarms/memory/dict_shared_memory.py @@ -4,7 +4,7 @@ import os import threading import uuid from pathlib import Path -from typing import Dict, Any +from typing import Any, Dict class DictSharedMemory: @@ -61,7 +61,7 @@ class DictSharedMemory: def get_top_n(self, n: int) -> None: """Get the top n entries from the internal memory.""" with self.lock: - with open(self.file_loc, "r") as f: + with open(self.file_loc) as f: try: file_data = json.load(f) except Exception as e: @@ -81,7 +81,7 @@ class DictSharedMemory: def write_to_file(self, data: Dict[str, Dict[str, Any]]) -> bool: """Write the internal memory to a file.""" if self.file_loc is not None: - with open(self.file_loc, "r") as f: + with open(self.file_loc) as f: try: file_data = json.load(f) except Exception as e: diff --git a/swarms/memory/lanchain_chroma.py b/swarms/memory/lanchain_chroma.py index e830ec4d..95a2e9e3 100644 --- a/swarms/memory/lanchain_chroma.py +++ b/swarms/memory/lanchain_chroma.py @@ -3,11 +3,12 @@ from pathlib import Path from langchain.chains import RetrievalQA from langchain.chains.question_answering import load_qa_chain -from swarms.models.openai_models import OpenAIChat from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import Chroma +from swarms.models.openai_models import OpenAIChat + def synchronized_mem(method): """ diff --git a/swarms/memory/pg.py b/swarms/memory/pg.py index d96b475d..b04beacf 100644 --- a/swarms/memory/pg.py +++ b/swarms/memory/pg.py @@ -1,5 +1,6 @@ import uuid from typing import Any, List, Optional + from sqlalchemy import JSON, Column, String, create_engine from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.ext.declarative import declarative_base diff --git a/swarms/memory/pinecone.py b/swarms/memory/pinecone.py index b1c0edf3..d33cb9cd 100644 --- a/swarms/memory/pinecone.py +++ b/swarms/memory/pinecone.py @@ -1,7 +1,9 @@ from typing import Optional -from swarms.memory.base_vectordb import AbstractVectorDatabase + import pinecone from attr import define, field + +from swarms.memory.base_vectordb import AbstractVectorDatabase from swarms.utils.hash import str_to_hash diff --git a/swarms/memory/short_term_memory.py b/swarms/memory/short_term_memory.py index d380fba5..34e32d5d 100644 --- a/swarms/memory/short_term_memory.py +++ b/swarms/memory/short_term_memory.py @@ -1,7 +1,8 @@ +import json import logging -from swarms.structs.base import BaseStructure import threading -import json + +from swarms.structs.base import BaseStructure class ShortTermMemory(BaseStructure): @@ -181,7 +182,7 @@ class ShortTermMemory(BaseStructure): """ try: with self.lock: - with open(filename, "r") as f: + with open(filename) as f: data = json.load(f) self.short_term_memory = data.get( "short_term_memory", [] diff --git a/swarms/memory/sqlite.py b/swarms/memory/sqlite.py index 542be34b..7a391303 100644 --- a/swarms/memory/sqlite.py +++ b/swarms/memory/sqlite.py @@ -1,4 +1,5 @@ -from typing import List, Tuple, Any, Optional +from typing import Any, List, Optional, Tuple + from swarms.memory.base_vectordb import AbstractVectorDatabase try: diff --git a/swarms/memory/utils.py b/swarms/memory/utils.py index 42801237..4dbbff80 100644 --- a/swarms/memory/utils.py +++ b/swarms/memory/utils.py @@ -5,8 +5,8 @@ from typing import List, Tuple, Type import numpy as np -from swarms.structs.document import Document from swarms.memory.cosine_similarity import cosine_similarity +from swarms.structs.document import Document class DistanceStrategy(str, Enum): diff --git a/swarms/memory/visual_memory.py b/swarms/memory/visual_memory.py index 46a59509..1361d6ec 100644 --- a/swarms/memory/visual_memory.py +++ b/swarms/memory/visual_memory.py @@ -1,5 +1,5 @@ -from typing import List from datetime import datetime +from typing import List class VisualShortTermMemory: diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index 00d9d1f2..0826e245 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -1,23 +1,50 @@ ############################################ LLMs -from swarms.models.base_llm import AbstractLLM # noqa: E402 from swarms.models.anthropic import Anthropic # noqa: E402 -from swarms.models.petals import Petals # noqa: E402 + +# 3############ Embedding models +from swarms.models.base_embedding_model import BaseEmbeddingModel +from swarms.models.base_llm import AbstractLLM # noqa: E402 + +################# MultiModal Models +from swarms.models.base_multimodal_model import ( + BaseMultiModalModel, +) # noqa: E402 +from swarms.models.biogpt import BioGPT # noqa: E402 +from swarms.models.clipq import CLIPQ # noqa: E402 +from swarms.models.fuyu import Fuyu # noqa: E402 +from swarms.models.gemini import Gemini # noqa: E402 +from swarms.models.gigabind import Gigabind # noqa: E402 +from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402 +from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 +from swarms.models.idefics import Idefics # noqa: E402 +from swarms.models.kosmos_two import Kosmos # noqa: E402 +from swarms.models.layoutlm_document_qa import ( + LayoutLMDocumentQA, +) # noqa: E402 + +# from swarms.models.vip_llava import VipLlavaMultiModal # noqa: E402 +from swarms.models.llava import LavaMultiModal # noqa: E402 from swarms.models.mistral import Mistral # noqa: E402 +from swarms.models.mixtral import Mixtral # noqa: E402 +from swarms.models.mpt import MPT7B # noqa: E402 +from swarms.models.nougat import Nougat # noqa: E402 from swarms.models.openai_models import ( - OpenAI, AzureOpenAI, + OpenAI, OpenAIChat, ) # noqa: E402 +from swarms.models.openai_tts import OpenAITTS # noqa: E402 +from swarms.models.petals import Petals # noqa: E402 +from swarms.models.qwen import QwenVLMultiModal # noqa: E402 +from swarms.models.roboflow_model import RoboflowMultiModal +from swarms.models.sam_supervision import SegmentAnythingMarkGenerator -# from swarms.models.vllm import vLLM # noqa: E402 -from swarms.models.zephyr import Zephyr # noqa: E402 -from swarms.models.biogpt import BioGPT # noqa: E402 -from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 -from swarms.models.wizard_storytelling import ( - WizardLLMStoryTeller, +##### Utils +from swarms.models.sampling_params import ( + SamplingParams, + SamplingType, ) # noqa: E402 -from swarms.models.mpt import MPT7B # noqa: E402 -from swarms.models.mixtral import Mixtral # noqa: E402 +from swarms.models.timm import TimmModel # noqa: E402 # from swarms.models.modelscope_pipeline import ModelScopePipeline # from swarms.models.modelscope_llm import ( @@ -25,34 +52,25 @@ from swarms.models.mixtral import Mixtral # noqa: E402 # ) # noqa: E402 from swarms.models.together import TogetherLLM # noqa: E402 -################# MultiModal Models -from swarms.models.base_multimodal_model import ( - BaseMultiModalModel, -) # noqa: E402 -from swarms.models.idefics import Idefics # noqa: E402 -from swarms.models.vilt import Vilt # noqa: E402 -from swarms.models.nougat import Nougat # noqa: E402 -from swarms.models.layoutlm_document_qa import ( - LayoutLMDocumentQA, -) # noqa: E402 -from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402 -from swarms.models.openai_tts import OpenAITTS # noqa: E402 -from swarms.models.gemini import Gemini # noqa: E402 -from swarms.models.gigabind import Gigabind # noqa: E402 -from swarms.models.zeroscope import ZeroscopeTTV # noqa: E402 -from swarms.models.timm import TimmModel # noqa: E402 +############## Types +from swarms.models.types import ( # noqa: E402 + AudioModality, + ImageModality, + MultimodalData, + TextModality, + VideoModality, +) from swarms.models.ultralytics_model import ( UltralyticsModel, ) # noqa: E402 +from swarms.models.vilt import Vilt # noqa: E402 +from swarms.models.wizard_storytelling import ( + WizardLLMStoryTeller, +) # noqa: E402 -# from swarms.models.vip_llava import VipLlavaMultiModal # noqa: E402 -from swarms.models.llava import LavaMultiModal # noqa: E402 -from swarms.models.qwen import QwenVLMultiModal # noqa: E402 -from swarms.models.clipq import CLIPQ # noqa: E402 -from swarms.models.kosmos_two import Kosmos # noqa: E402 -from swarms.models.fuyu import Fuyu # noqa: E402 -from swarms.models.roboflow_model import RoboflowMultiModal -from swarms.models.sam_supervision import SegmentAnythingMarkGenerator +# from swarms.models.vllm import vLLM # noqa: E402 +from swarms.models.zephyr import Zephyr # noqa: E402 +from swarms.models.zeroscope import ZeroscopeTTV # noqa: E402 # from swarms.models.dalle3 import Dalle3 # from swarms.models.distilled_whisperx import DistilWhisperModel # noqa: E402 @@ -64,25 +82,6 @@ from swarms.models.sam_supervision import SegmentAnythingMarkGenerator ################# Tokenizers -############## Types -from swarms.models.types import ( - TextModality, - ImageModality, - AudioModality, - VideoModality, - MultimodalData, -) # noqa: E402 - -# 3############ Embedding models -from swarms.models.base_embedding_model import BaseEmbeddingModel - - -##### Utils -from swarms.models.sampling_params import ( - SamplingType, - SamplingParams, -) # noqa: E402 - __all__ = [ "AbstractLLM", "Anthropic", diff --git a/swarms/models/anthropic.py b/swarms/models/anthropic.py index 0e4690f9..5292f202 100644 --- a/swarms/models/anthropic.py +++ b/swarms/models/anthropic.py @@ -24,14 +24,12 @@ from langchain.callbacks.manager import ( CallbackManagerForLLMRun, ) from langchain.llms.base import LLM -from pydantic import Field, SecretStr, root_validator from langchain.schema.language_model import BaseLanguageModel from langchain.schema.output import GenerationChunk from langchain.schema.prompt import PromptValue -from langchain.utils import ( - get_from_dict_or_env, -) +from langchain.utils import get_from_dict_or_env from packaging.version import parse +from pydantic import Field, SecretStr, root_validator from requests import HTTPError, Response diff --git a/swarms/models/base_embedding_model.py b/swarms/models/base_embedding_model.py index 218e52aa..4cdead11 100644 --- a/swarms/models/base_embedding_model.py +++ b/swarms/models/base_embedding_model.py @@ -2,15 +2,14 @@ from __future__ import annotations from abc import ABC, abstractmethod from dataclasses import dataclass, field -from typing import Optional import numpy as np -from swarms.chunkers.base_chunker import BaseChunker -from swarms.chunkers.text_chunker import TextChunker -from swarms.utils.exponential_backoff import ExponentialBackoffMixin from swarms.artifacts.text_artifact import TextArtifact +from swarms.chunkers.base_chunker import BaseChunker +from swarms.chunkers.text_chunker import TextChunker from swarms.tokenizers.base_tokenizer import BaseTokenizer +from swarms.utils.exponential_backoff import ExponentialBackoffMixin @dataclass @@ -26,7 +25,7 @@ class BaseEmbeddingModel( """ model: str = None - tokenizer: Optional[BaseTokenizer] = None + tokenizer: BaseTokenizer | None = None chunker: BaseChunker = field(init=False) def __post_init__(self) -> None: diff --git a/swarms/models/base_llm.py b/swarms/models/base_llm.py index bc1f67c7..d69f21a8 100644 --- a/swarms/models/base_llm.py +++ b/swarms/models/base_llm.py @@ -123,7 +123,6 @@ class AbstractLLM(ABC): @metrics_decorator def run(self, task: Optional[str] = None, *args, **kwargs) -> str: """generate text using language model""" - pass async def arun(self, task: Optional[str] = None, *args, **kwargs): """Asynchronous run @@ -190,7 +189,6 @@ class AbstractLLM(ABC): def generate_summary(self, text: str) -> str: """Generate Summary""" - pass def set_temperature(self, value: float): """Set Temperature""" diff --git a/swarms/models/base_multimodal_model.py b/swarms/models/base_multimodal_model.py index c4a5890a..25975eaa 100644 --- a/swarms/models/base_multimodal_model.py +++ b/swarms/models/base_multimodal_model.py @@ -105,7 +105,6 @@ class BaseMultiModalModel: **kwargs, ): """Run the model""" - pass def __call__( self, @@ -127,7 +126,6 @@ class BaseMultiModalModel: async def arun(self, task: str, img: str, *args, **kwargs): """Run the model asynchronously""" - pass def get_img_from_web(self, img: str, *args, **kwargs): """Get the image from the web""" @@ -294,7 +292,6 @@ class BaseMultiModalModel: @abstractmethod def generate_summary(self, text: str) -> str: """Generate Summary""" - pass def set_temperature(self, value: float): """Set Temperature""" diff --git a/swarms/models/base_tts.py b/swarms/models/base_tts.py index 60896856..402b8501 100644 --- a/swarms/models/base_tts.py +++ b/swarms/models/base_tts.py @@ -1,7 +1,8 @@ import wave +from abc import abstractmethod from typing import Optional + from swarms.models.base_llm import AbstractLLM -from abc import abstractmethod class BaseTTSModel(AbstractLLM): @@ -47,7 +48,6 @@ class BaseTTSModel(AbstractLLM): Args: filepath (Optional[str], optional): _description_. Defaults to None. """ - pass def load(self, filepath: Optional[str] = None): """Load the model from a file. @@ -55,7 +55,6 @@ class BaseTTSModel(AbstractLLM): Args: filepath (Optional[str], optional): _description_. Defaults to None. """ - pass @abstractmethod def run(self, task: str, *args, **kwargs): @@ -64,7 +63,6 @@ class BaseTTSModel(AbstractLLM): Args: task (str): _description_ """ - pass def __call__(self, task: str, *args, **kwargs): """Call the model on the given task. diff --git a/swarms/models/base_ttv.py b/swarms/models/base_ttv.py index 6ef959e8..ee795c26 100644 --- a/swarms/models/base_ttv.py +++ b/swarms/models/base_ttv.py @@ -1,9 +1,11 @@ -from abc import abstractmethod -from swarms.models.base_llm import AbstractLLM -from diffusers.utils import export_to_video -from typing import Optional, List import asyncio +from abc import abstractmethod from concurrent.futures import ThreadPoolExecutor +from typing import List, Optional + +from diffusers.utils import export_to_video + +from swarms.models.base_llm import AbstractLLM class BaseTextToVideo(AbstractLLM): diff --git a/swarms/models/biogpt.py b/swarms/models/biogpt.py index 9ee5b513..a5ec7b7b 100644 --- a/swarms/models/biogpt.py +++ b/swarms/models/biogpt.py @@ -1,4 +1,4 @@ -""" +r""" BioGPT Pre-trained language models have attracted increasing attention in the biomedical domain, inspired by their great success in the general natural language domain. @@ -35,10 +35,10 @@ advantage of BioGPT on biomedical literature to generate fluent descriptions for import torch from transformers import ( + BioGptForCausalLM, + BioGptTokenizer, pipeline, set_seed, - BioGptTokenizer, - BioGptForCausalLM, ) diff --git a/swarms/models/chest_agent.py b/swarms/models/chest_agent.py index 2867596c..7bf4e850 100644 --- a/swarms/models/chest_agent.py +++ b/swarms/models/chest_agent.py @@ -1,4 +1,5 @@ import io + import requests import torch from PIL import Image @@ -7,6 +8,7 @@ from transformers import ( AutoProcessor, GenerationConfig, ) + from swarms.models.base_multimodal_model import ( BaseMultiModalModel, ) # noqa: F401 diff --git a/swarms/models/cog_agent.py b/swarms/models/cog_agent.py index 2d0d09e9..35217c48 100644 --- a/swarms/models/cog_agent.py +++ b/swarms/models/cog_agent.py @@ -1,6 +1,7 @@ import torch -from PIL import Image from modelscope import AutoModelForCausalLM, AutoTokenizer +from PIL import Image + from swarms.models.base_multimodal_model import BaseMultiModalModel device_check = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/swarms/models/cohere_chat.py b/swarms/models/cohere_chat.py index 1a31d82e..98cc30bb 100644 --- a/swarms/models/cohere_chat.py +++ b/swarms/models/cohere_chat.py @@ -1,14 +1,6 @@ import logging from typing import Any, Callable, Dict, List, Optional -from tenacity import ( - before_sleep_log, - retry, - retry_if_exception_type, - stop_after_attempt, - wait_exponential, -) - from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, @@ -16,8 +8,15 @@ from langchain.callbacks.manager import ( from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.load.serializable import Serializable -from pydantic import Extra, Field, root_validator from langchain.utils import get_from_dict_or_env +from pydantic import Extra, Field, root_validator +from tenacity import ( + before_sleep_log, + retry, + retry_if_exception_type, + stop_after_attempt, + wait_exponential, +) logger = logging.getLogger(__name__) @@ -35,7 +34,7 @@ def _create_retry_decorator(llm) -> Callable[[Any], Any]: wait=wait_exponential( multiplier=1, min=min_seconds, max=max_seconds ), - retry=(retry_if_exception_type(cohere.error.CohereError)), + retry=retry_if_exception_type(cohere.error.CohereError), before_sleep=before_sleep_log(logger, logging.WARNING), ) diff --git a/swarms/models/embeddings_base.py b/swarms/models/embeddings_base.py index b0f5e22e..e91c415f 100644 --- a/swarms/models/embeddings_base.py +++ b/swarms/models/embeddings_base.py @@ -1,4 +1,5 @@ """Interface for embedding models.""" + from abc import ABC, abstractmethod from typing import List diff --git a/swarms/models/gemini.py b/swarms/models/gemini.py index d12ea7d9..249e9c53 100644 --- a/swarms/models/gemini.py +++ b/swarms/models/gemini.py @@ -5,7 +5,6 @@ from pathlib import Path from dotenv import load_dotenv from PIL import Image - from swarms.models.base_multimodal_model import BaseMultiModalModel try: diff --git a/swarms/models/gpt4_sam.py b/swarms/models/gpt4_sam.py index aef0181f..37dde6a0 100644 --- a/swarms/models/gpt4_sam.py +++ b/swarms/models/gpt4_sam.py @@ -1,10 +1,11 @@ +from typing import Any + import cv2 from swarms.models.base_multimodal_model import BaseMultiModalModel from swarms.models.sam_supervision import SegmentAnythingMarkGenerator from swarms.utils.supervision_masking import refine_marks from swarms.utils.supervision_visualizer import MarkVisualizer -from typing import Any class GPT4VSAM(BaseMultiModalModel): diff --git a/swarms/models/gpt4_vision_api.py b/swarms/models/gpt4_vision_api.py index 57553bb9..5966a0b6 100644 --- a/swarms/models/gpt4_vision_api.py +++ b/swarms/models/gpt4_vision_api.py @@ -8,6 +8,7 @@ import aiohttp import requests from dotenv import load_dotenv from termcolor import colored + from swarms.models.base_multimodal_model import BaseMultiModalModel try: @@ -298,8 +299,6 @@ class GPT4VisionAPI(BaseMultiModalModel): if self.streaming_enabled: content = self.stream_response(content) - else: - pass if self.beautify: content = colored(content, "cyan") @@ -362,8 +361,6 @@ class GPT4VisionAPI(BaseMultiModalModel): if self.streaming_enabled: content = self.stream_response(content) - else: - pass if self.beautify: content = colored(content, "cyan") diff --git a/swarms/models/huggingface_pipeline.py b/swarms/models/huggingface_pipeline.py index e61d1080..e8d1afb9 100644 --- a/swarms/models/huggingface_pipeline.py +++ b/swarms/models/huggingface_pipeline.py @@ -1,6 +1,7 @@ from abc import abstractmethod -from termcolor import colored + import torch +from termcolor import colored from swarms.models.base_llm import AbstractLLM diff --git a/swarms/models/idefics.py b/swarms/models/idefics.py index b014fbce..cc654221 100644 --- a/swarms/models/idefics.py +++ b/swarms/models/idefics.py @@ -1,8 +1,10 @@ +from typing import Callable, Optional + import torch -from transformers import AutoProcessor, IdeficsForVisionText2Text from termcolor import colored +from transformers import AutoProcessor, IdeficsForVisionText2Text + from swarms.models.base_multimodal_model import BaseMultiModalModel -from typing import Optional, Callable def autodetect_device(): diff --git a/swarms/models/jina_embeds.py b/swarms/models/jina_embeds.py index ea621993..06689752 100644 --- a/swarms/models/jina_embeds.py +++ b/swarms/models/jina_embeds.py @@ -1,5 +1,5 @@ -import os import logging +import os import torch from numpy.linalg import norm @@ -9,6 +9,7 @@ from transformers import ( AutoTokenizer, BitsAndBytesConfig, ) + from swarms.models.base_embedding_model import BaseEmbeddingModel diff --git a/swarms/models/kosmos_two.py b/swarms/models/kosmos_two.py index 6bc4d810..0399f943 100644 --- a/swarms/models/kosmos_two.py +++ b/swarms/models/kosmos_two.py @@ -41,7 +41,7 @@ class Kosmos(BaseMultiModalModel): *args, **kwargs, ): - super(Kosmos, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.max_new_tokens = max_new_tokens diff --git a/swarms/models/layoutlm_document_qa.py b/swarms/models/layoutlm_document_qa.py index ad5b0628..09aa9a1a 100644 --- a/swarms/models/layoutlm_document_qa.py +++ b/swarms/models/layoutlm_document_qa.py @@ -2,7 +2,9 @@ LayoutLMDocumentQA is a multimodal good for visual question answering on real world docs lik invoice, pdfs, etc """ + from transformers import pipeline + from swarms.models.base_multimodal_model import BaseMultiModalModel @@ -29,7 +31,7 @@ class LayoutLMDocumentQA(BaseMultiModalModel): *args, **kwargs, ): - super(LayoutLMDocumentQA, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.model_name = model_name self.task_type = task_type self.pipeline = pipeline(task_type, model=model_name) diff --git a/swarms/models/llama_function_caller.py b/swarms/models/llama_function_caller.py index 78169208..92593377 100644 --- a/swarms/models/llama_function_caller.py +++ b/swarms/models/llama_function_caller.py @@ -3,14 +3,15 @@ # !pip install transformers # !pip install bitsandbytes +from typing import Callable, Dict, List + import torch from transformers import ( - AutoTokenizer, AutoModelForCausalLM, + AutoTokenizer, BitsAndBytesConfig, TextStreamer, ) -from typing import Callable, Dict, List class LlamaFunctionCaller: diff --git a/swarms/models/llava.py b/swarms/models/llava.py index bcc1b09f..5aa4681f 100644 --- a/swarms/models/llava.py +++ b/swarms/models/llava.py @@ -1,8 +1,10 @@ +from io import BytesIO +from typing import Tuple, Union + import requests from PIL import Image from transformers import AutoProcessor, LlavaForConditionalGeneration -from typing import Tuple, Union -from io import BytesIO + from swarms.models.base_multimodal_model import BaseMultiModalModel diff --git a/swarms/models/mistral.py b/swarms/models/mistral.py index 6cfb6f77..dc7ba462 100644 --- a/swarms/models/mistral.py +++ b/swarms/models/mistral.py @@ -1,8 +1,8 @@ import torch from transformers import AutoModelForCausalLM, AutoTokenizer -from swarms.structs.message import Message from swarms.models.base_llm import AbstractLLM +from swarms.structs.message import Message class Mistral(AbstractLLM): diff --git a/swarms/models/mixtral.py b/swarms/models/mixtral.py index 6f3a9c7d..21720845 100644 --- a/swarms/models/mixtral.py +++ b/swarms/models/mixtral.py @@ -1,5 +1,7 @@ from typing import Optional + from transformers import AutoModelForCausalLM, AutoTokenizer + from swarms.models.base_llm import AbstractLLM diff --git a/swarms/models/model_registry.py b/swarms/models/model_registry.py index 6da04282..ee5bab81 100644 --- a/swarms/models/model_registry.py +++ b/swarms/models/model_registry.py @@ -1,5 +1,5 @@ -import pkgutil import inspect +import pkgutil class ModelRegistry: diff --git a/swarms/models/mpt.py b/swarms/models/mpt.py index 56f1bbdb..543e3f41 100644 --- a/swarms/models/mpt.py +++ b/swarms/models/mpt.py @@ -1,6 +1,7 @@ +import logging + import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline -import logging class MPT7B: diff --git a/swarms/models/nougat.py b/swarms/models/nougat.py index 453c6cae..e3d35370 100644 --- a/swarms/models/nougat.py +++ b/swarms/models/nougat.py @@ -8,7 +8,9 @@ format - Extracting metadata from pdfs """ + import re + import torch from PIL import Image from transformers import NougatProcessor, VisionEncoderDecoderModel diff --git a/swarms/models/odin.py b/swarms/models/odin.py index 6a842af4..68bfaffd 100644 --- a/swarms/models/odin.py +++ b/swarms/models/odin.py @@ -1,7 +1,9 @@ import os + import supervision as sv -from ultralytics_example import YOLO from tqdm import tqdm +from ultralytics_example import YOLO + from swarms.models.base_llm import AbstractLLM from swarms.utils.download_weights_from_url import ( download_weights_from_url, @@ -34,7 +36,7 @@ class Odin(AbstractLLM): confidence_threshold: float = 0.3, iou_threshold: float = 0.7, ): - super(Odin, self).__init__() + super().__init__() self.source_weights_path = source_weights_path self.confidence_threshold = confidence_threshold self.iou_threshold = iou_threshold diff --git a/swarms/models/open_dalle.py b/swarms/models/open_dalle.py index b43d6c2e..57e8846b 100644 --- a/swarms/models/open_dalle.py +++ b/swarms/models/open_dalle.py @@ -1,7 +1,8 @@ -from typing import Optional, Any +from typing import Any, Optional import torch from diffusers import AutoPipelineForText2Image + from swarms.models.base_multimodal_model import BaseMultiModalModel diff --git a/swarms/models/openai_embeddings.py b/swarms/models/openai_embeddings.py index 0cbbdbee..c8151bdb 100644 --- a/swarms/models/openai_embeddings.py +++ b/swarms/models/openai_embeddings.py @@ -2,18 +2,7 @@ from __future__ import annotations import logging import warnings -from typing import ( - Any, - Callable, - Dict, - List, - Literal, - Optional, - Sequence, - Set, - Tuple, - Union, -) +from typing import Any, Callable, Literal, Sequence import numpy as np from pydantic import BaseModel, Extra, Field, root_validator @@ -25,6 +14,7 @@ from tenacity import ( stop_after_attempt, wait_exponential, ) + from swarms.models.embeddings_base import Embeddings @@ -36,7 +26,7 @@ def get_from_dict_or_env( return values.get(key) or os.getenv(env_key) or default -def get_pydantic_field_names(cls: Any) -> Set[str]: +def get_pydantic_field_names(cls: Any) -> set[str]: return set(cls.__annotations__.keys()) @@ -156,6 +146,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): .. code-block:: python from langchain.embeddings import OpenAIEmbeddings + openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set @@ -168,6 +159,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): .. code-block:: python import os + os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https:// Dict[str, Any]: + def build_extra(cls, values: dict[str, Any]) -> dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) @@ -265,7 +256,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): return values @root_validator() - def validate_environment(cls, values: Dict) -> Dict: + def validate_environment(cls, values: dict) -> dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" @@ -320,7 +311,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): return values @property - def _invocation_params(self) -> Dict: + def _invocation_params(self) -> dict: openai_args = { "model": self.model, "request_timeout": self.request_timeout, @@ -345,12 +336,12 @@ class OpenAIEmbeddings(BaseModel, Embeddings): def _get_len_safe_embeddings( self, - texts: List[str], + texts: list[str], *, engine: str, - chunk_size: Optional[int] = None, - ) -> List[List[float]]: - embeddings: List[List[float]] = [ + chunk_size: int | None = None, + ) -> list[list[float]]: + embeddings: list[list[float]] = [ [] for _ in range(len(texts)) ] try: @@ -390,7 +381,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): ) indices.append(i) - batched_embeddings: List[List[float]] = [] + batched_embeddings: list[list[float]] = [] _chunk_size = chunk_size or self.chunk_size if self.show_progress_bar: @@ -413,10 +404,10 @@ class OpenAIEmbeddings(BaseModel, Embeddings): r["embedding"] for r in response["data"] ) - results: List[List[List[float]]] = [ + results: list[list[list[float]]] = [ [] for _ in range(len(texts)) ] - num_tokens_in_batch: List[List[int]] = [ + num_tokens_in_batch: list[list[int]] = [ [] for _ in range(len(texts)) ] for i in range(len(indices)): @@ -445,12 +436,12 @@ class OpenAIEmbeddings(BaseModel, Embeddings): # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb async def _aget_len_safe_embeddings( self, - texts: List[str], + texts: list[str], *, engine: str, - chunk_size: Optional[int] = None, - ) -> List[List[float]]: - embeddings: List[List[float]] = [ + chunk_size: int | None = None, + ) -> list[list[float]]: + embeddings: list[list[float]] = [ [] for _ in range(len(texts)) ] try: @@ -490,7 +481,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): ) indices.append(i) - batched_embeddings: List[List[float]] = [] + batched_embeddings: list[list[float]] = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( @@ -502,10 +493,10 @@ class OpenAIEmbeddings(BaseModel, Embeddings): r["embedding"] for r in response["data"] ) - results: List[List[List[float]]] = [ + results: list[list[list[float]]] = [ [] for _ in range(len(texts)) ] - num_tokens_in_batch: List[List[int]] = [ + num_tokens_in_batch: list[list[int]] = [ [] for _ in range(len(texts)) ] for i in range(len(indices)): @@ -533,8 +524,8 @@ class OpenAIEmbeddings(BaseModel, Embeddings): return embeddings def embed_documents( - self, texts: List[str], chunk_size: Optional[int] = 0 - ) -> List[List[float]]: + self, texts: list[str], chunk_size: int | None = 0 + ) -> list[list[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: @@ -552,8 +543,8 @@ class OpenAIEmbeddings(BaseModel, Embeddings): ) async def aembed_documents( - self, texts: List[str], chunk_size: Optional[int] = 0 - ) -> List[List[float]]: + self, texts: list[str], chunk_size: int | None = 0 + ) -> list[list[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: @@ -570,7 +561,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): texts, engine=self.deployment ) - def embed_query(self, text: str) -> List[float]: + def embed_query(self, text: str) -> list[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: @@ -581,7 +572,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): """ return self.embed_documents([text])[0] - async def aembed_query(self, text: str) -> List[float]: + async def aembed_query(self, text: str) -> list[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: diff --git a/swarms/models/openai_models.py b/swarms/models/openai_models.py index b1aa0117..2ee56fdb 100644 --- a/swarms/models/openai_models.py +++ b/swarms/models/openai_models.py @@ -10,16 +10,9 @@ from typing import ( AsyncIterator, Callable, Collection, - Dict, Iterator, - List, Literal, Mapping, - Optional, - Set, - Tuple, - Type, - Union, ) from langchain.callbacks.manager import ( @@ -61,11 +54,12 @@ def _log_error_once(msg: str) -> None: def create_base_retry_decorator( - error_types: List[Type[BaseException]], + error_types: list[type[BaseException]], max_retries: int = 1, - run_manager: Optional[ - Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] - ] = None, + run_manager: ( + AsyncCallbackManagerForLLMRun | CallbackManagerForLLMRun + ) + | None = None, ) -> Callable[[Any], Any]: """Create a retry decorator for a given LLM and provided list of error types.""" @@ -92,7 +86,7 @@ def create_base_retry_decorator( max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards - retry_instance: "retry_base" = retry_if_exception_type( + retry_instance: retry_base = retry_if_exception_type( error_types[0] ) for error in error_types[1:]: @@ -116,9 +110,9 @@ def is_openai_v1() -> bool: def update_token_usage( - keys: Set[str], - response: Dict[str, Any], - token_usage: Dict[str, Any], + keys: set[str], + response: dict[str, Any], + token_usage: dict[str, Any], ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) @@ -130,7 +124,7 @@ def update_token_usage( def _stream_response_to_generation_chunk( - stream_response: Dict[str, Any], + stream_response: dict[str, Any], ) -> GenerationChunk: """Convert a stream response to a generation chunk.""" return GenerationChunk( @@ -147,7 +141,7 @@ def _stream_response_to_generation_chunk( def _update_response( - response: Dict[str, Any], stream_response: Dict[str, Any] + response: dict[str, Any], stream_response: dict[str, Any] ) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0][ @@ -161,7 +155,7 @@ def _update_response( ]["logprobs"] -def _streaming_response_template() -> Dict[str, Any]: +def _streaming_response_template() -> dict[str, Any]: return { "choices": [ { @@ -174,10 +168,11 @@ def _streaming_response_template() -> Dict[str, Any]: def _create_retry_decorator( - llm: Union[BaseOpenAI, OpenAIChat], - run_manager: Optional[ - Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] - ] = None, + llm: BaseOpenAI | OpenAIChat, + run_manager: ( + AsyncCallbackManagerForLLMRun | CallbackManagerForLLMRun + ) + | None = None, ) -> Callable[[Any], Any]: import openai @@ -196,8 +191,8 @@ def _create_retry_decorator( def completion_with_retry( - llm: Union[BaseOpenAI, OpenAIChat], - run_manager: Optional[CallbackManagerForLLMRun] = None, + llm: BaseOpenAI | OpenAIChat, + run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" @@ -213,8 +208,8 @@ def completion_with_retry( async def acompletion_with_retry( - llm: Union[BaseOpenAI, OpenAIChat], - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + llm: BaseOpenAI | OpenAIChat, + run_manager: AsyncCallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> Any: """Use tenacity to retry the async completion call.""" @@ -234,12 +229,12 @@ class BaseOpenAI(BaseLLM): """Base OpenAI large language model class.""" @property - def lc_secrets(self) -> Dict[str, str]: + def lc_secrets(self) -> dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} @property - def lc_attributes(self) -> Dict[str, Any]: - attributes: Dict[str, Any] = {} + def lc_attributes(self) -> dict[str, Any]: + attributes: dict[str, Any] = {} if self.openai_api_base != "": attributes["openai_api_base"] = self.openai_api_base @@ -278,32 +273,28 @@ class BaseOpenAI(BaseLLM): """How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" - model_kwargs: Dict[str, Any] = Field(default_factory=dict) + model_kwargs: dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" - openai_api_key: Optional[str] = None - openai_api_base: Optional[str] = None - openai_organization: Optional[str] = None + openai_api_key: str | None = None + openai_api_base: str | None = None + openai_organization: str | None = None # to support explicit proxy for OpenAI - openai_proxy: Optional[str] = None + openai_proxy: str | None = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" - request_timeout: Optional[Union[float, Tuple[float, float]]] = ( - None - ) + request_timeout: float | tuple[float, float] | None = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" - logit_bias: Optional[Dict[str, float]] = Field( - default_factory=dict - ) + logit_bias: dict[str, float] | None = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" - allowed_special: Union[Literal["all"], AbstractSet[str]] = set() + allowed_special: Literal["all"] | AbstractSet[str] = set() """Set of special tokens that are allowed。""" - disallowed_special: Union[Literal["all"], Collection[str]] = "all" + disallowed_special: Literal["all"] | Collection[str] = "all" """Set of special tokens that are not allowed。""" - tiktoken_model_name: Optional[str] = None + tiktoken_model_name: str | None = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will @@ -314,7 +305,7 @@ class BaseOpenAI(BaseLLM): API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" - def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore + def __new__(cls, **data: Any) -> OpenAIChat | BaseOpenAI: # type: ignore """Initialize the OpenAI object.""" data.get("model_name", "") return super().__new__(cls) @@ -325,7 +316,7 @@ class BaseOpenAI(BaseLLM): allow_population_by_field_name = True @root_validator(pre=True) - def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: + def build_extra(cls, values: dict[str, Any]) -> dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get("model_kwargs", {}) @@ -335,7 +326,7 @@ class BaseOpenAI(BaseLLM): return values @root_validator() - def validate_environment(cls, values: Dict) -> Dict: + def validate_environment(cls, values: dict) -> dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" @@ -376,7 +367,7 @@ class BaseOpenAI(BaseLLM): return values @property - def _default_params(self) -> Dict[str, Any]: + def _default_params(self) -> dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params = { "temperature": self.temperature, @@ -399,8 +390,8 @@ class BaseOpenAI(BaseLLM): def _stream( self, prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, + stop: list[str] | None = None, + run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: params = {**self._invocation_params, **kwargs, "stream": True} @@ -427,8 +418,8 @@ class BaseOpenAI(BaseLLM): async def _astream( self, prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + stop: list[str] | None = None, + run_manager: AsyncCallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: params = {**self._invocation_params, **kwargs, "stream": True} @@ -454,9 +445,9 @@ class BaseOpenAI(BaseLLM): def _generate( self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, + prompts: list[str], + stop: list[str] | None = None, + run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. @@ -478,7 +469,7 @@ class BaseOpenAI(BaseLLM): params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] - token_usage: Dict[str, int] = {} + token_usage: dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} @@ -489,7 +480,7 @@ class BaseOpenAI(BaseLLM): "Cannot stream results with multiple prompts." ) - generation: Optional[GenerationChunk] = None + generation: GenerationChunk | None = None for chunk in self._stream( _prompts[0], stop, run_manager, **kwargs ): @@ -528,9 +519,9 @@ class BaseOpenAI(BaseLLM): async def _agenerate( self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + prompts: list[str], + stop: list[str] | None = None, + run_manager: AsyncCallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" @@ -538,7 +529,7 @@ class BaseOpenAI(BaseLLM): params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] - token_usage: Dict[str, int] = {} + token_usage: dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} @@ -549,7 +540,7 @@ class BaseOpenAI(BaseLLM): "Cannot stream results with multiple prompts." ) - generation: Optional[GenerationChunk] = None + generation: GenerationChunk | None = None async for chunk in self._astream( _prompts[0], stop, run_manager, **kwargs ): @@ -588,10 +579,10 @@ class BaseOpenAI(BaseLLM): def get_sub_prompts( self, - params: Dict[str, Any], - prompts: List[str], - stop: Optional[List[str]] = None, - ) -> List[List[str]]: + params: dict[str, Any], + prompts: list[str], + stop: list[str] | None = None, + ) -> list[list[str]]: """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: @@ -618,8 +609,8 @@ class BaseOpenAI(BaseLLM): def create_llm_result( self, choices: Any, - prompts: List[str], - token_usage: Dict[str, int], + prompts: list[str], + token_usage: dict[str, int], ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] @@ -646,9 +637,9 @@ class BaseOpenAI(BaseLLM): ) @property - def _invocation_params(self) -> Dict[str, Any]: + def _invocation_params(self) -> dict[str, Any]: """Get the parameters used to invoke the model.""" - openai_creds: Dict[str, Any] = { + openai_creds: dict[str, Any] = { "api_key": self.openai_api_key, "api_base": self.openai_api_base, "organization": self.openai_organization, @@ -672,7 +663,7 @@ class BaseOpenAI(BaseLLM): """Return type of llm.""" return "openai" - def get_token_ids(self, text: str) -> List[int]: + def get_token_ids(self, text: str) -> list[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: @@ -797,11 +788,12 @@ class OpenAI(BaseOpenAI): .. code-block:: python from langchain.llms import OpenAI + openai = OpenAI(model_name="text-davinci-003") """ @property - def _invocation_params(self) -> Dict[str, Any]: + def _invocation_params(self) -> dict[str, Any]: return { **{"model": self.model_name}, **super()._invocation_params, @@ -821,6 +813,7 @@ class AzureOpenAI(BaseOpenAI): .. code-block:: python from langchain.llms import AzureOpenAI + openai = AzureOpenAI(model_name="text-davinci-003") """ @@ -830,7 +823,7 @@ class AzureOpenAI(BaseOpenAI): openai_api_version: str = "" @root_validator() - def validate_azure_settings(cls, values: Dict) -> Dict: + def validate_azure_settings(cls, values: dict) -> dict: values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", @@ -849,7 +842,7 @@ class AzureOpenAI(BaseOpenAI): } @property - def _invocation_params(self) -> Dict[str, Any]: + def _invocation_params(self) -> dict[str, Any]: openai_params = { "engine": self.deployment_name, "api_type": self.openai_api_type, @@ -863,7 +856,7 @@ class AzureOpenAI(BaseOpenAI): return "azure" @property - def lc_attributes(self) -> Dict[str, Any]: + def lc_attributes(self) -> dict[str, Any]: return { "openai_api_type": self.openai_api_type, "openai_api_version": self.openai_api_version, @@ -898,28 +891,29 @@ class OpenAIChat(BaseLLM): .. code-block:: python from langchain.llms import OpenAIChat + openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ client: Any #: :meta private: model_name: str = "gpt-4-1106-preview" - model_kwargs: Dict[str, Any] = Field(default_factory=dict) - openai_api_key: Optional[str] = None - openai_api_base: Optional[str] = None - openai_proxy: Optional[str] = None + model_kwargs: dict[str, Any] = Field(default_factory=dict) + openai_api_key: str | None = None + openai_api_base: str | None = None + openai_proxy: str | None = None max_retries: int = 6 """Maximum number of retries to make when generating.""" - prefix_messages: List = Field(default_factory=list) + prefix_messages: list = Field(default_factory=list) """Series of messages for Chat input.""" streaming: bool = False """Whether to stream the results or not.""" - allowed_special: Union[Literal["all"], AbstractSet[str]] = set() + allowed_special: Literal["all"] | AbstractSet[str] = set() """Set of special tokens that are allowed。""" - disallowed_special: Union[Literal["all"], Collection[str]] = "all" + disallowed_special: Literal["all"] | Collection[str] = "all" """Set of special tokens that are not allowed。""" @root_validator(pre=True) - def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: + def build_extra(cls, values: dict[str, Any]) -> dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = { field.alias for field in cls.__fields__.values() @@ -937,7 +931,7 @@ class OpenAIChat(BaseLLM): return values @root_validator() - def validate_environment(cls, values: Dict) -> Dict: + def validate_environment(cls, values: dict) -> dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" @@ -987,13 +981,13 @@ class OpenAIChat(BaseLLM): return values @property - def _default_params(self) -> Dict[str, Any]: + def _default_params(self) -> dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return self.model_kwargs def _get_chat_params( - self, prompts: List[str], stop: Optional[List[str]] = None - ) -> Tuple: + self, prompts: list[str], stop: list[str] | None = None + ) -> tuple: if len(prompts) > 1: raise ValueError( "OpenAIChat currently only supports single prompt," @@ -1002,7 +996,7 @@ class OpenAIChat(BaseLLM): messages = self.prefix_messages + [ {"role": "user", "content": prompts[0]} ] - params: Dict[str, Any] = { + params: dict[str, Any] = { **{"model": self.model_name}, **self._default_params, } @@ -1021,8 +1015,8 @@ class OpenAIChat(BaseLLM): def _stream( self, prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, + stop: list[str] | None = None, + run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> Iterator[GenerationChunk]: messages, params = self._get_chat_params([prompt], stop) @@ -1041,8 +1035,8 @@ class OpenAIChat(BaseLLM): async def _astream( self, prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + stop: list[str] | None = None, + run_manager: AsyncCallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> AsyncIterator[GenerationChunk]: messages, params = self._get_chat_params([prompt], stop) @@ -1060,13 +1054,13 @@ class OpenAIChat(BaseLLM): def _generate( self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, + prompts: list[str], + stop: list[str] | None = None, + run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> LLMResult: if self.streaming: - generation: Optional[GenerationChunk] = None + generation: GenerationChunk | None = None for chunk in self._stream( prompts[0], stop, run_manager, **kwargs ): @@ -1101,13 +1095,13 @@ class OpenAIChat(BaseLLM): async def _agenerate( self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + prompts: list[str], + stop: list[str] | None = None, + run_manager: AsyncCallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> LLMResult: if self.streaming: - generation: Optional[GenerationChunk] = None + generation: GenerationChunk | None = None async for chunk in self._astream( prompts[0], stop, run_manager, **kwargs ): @@ -1153,7 +1147,7 @@ class OpenAIChat(BaseLLM): """Return type of llm.""" return "openai-chat" - def get_token_ids(self, text: str) -> List[int]: + def get_token_ids(self, text: str) -> list[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: diff --git a/swarms/models/palm.py b/swarms/models/palm.py index d61d4856..ee0cbea2 100644 --- a/swarms/models/palm.py +++ b/swarms/models/palm.py @@ -1,7 +1,7 @@ from __future__ import annotations import logging -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms import BaseLLM @@ -85,19 +85,19 @@ class GooglePalm(BaseLLM, BaseModel): """Google PaLM models.""" client: Any #: :meta private: - google_api_key: Optional[str] + google_api_key: str | None model_name: str = "models/text-bison-001" """Model name to use.""" temperature: float = 0.7 """Run inference with this temperature. Must by in the closed interval [0.0, 1.0].""" - top_p: Optional[float] = None + top_p: float | None = None """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" - top_k: Optional[int] = None + top_k: int | None = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.""" - max_output_tokens: Optional[int] = None + max_output_tokens: int | None = None """Maximum number of tokens to include in a candidate. Must be greater than zero. If unset, will default to 64.""" n: int = 1 @@ -105,7 +105,7 @@ class GooglePalm(BaseLLM, BaseModel): not return the full n completions if duplicates are generated.""" @root_validator() - def validate_environment(cls, values: Dict) -> Dict: + def validate_environment(cls, values: dict) -> dict: """Validate api key, python package exists.""" google_api_key = get_from_dict_or_env( values, "google_api_key", "GOOGLE_API_KEY" @@ -152,9 +152,9 @@ class GooglePalm(BaseLLM, BaseModel): def _generate( self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, + prompts: list[str], + stop: list[str] | None = None, + run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> LLMResult: generations = [] diff --git a/swarms/models/petals.py b/swarms/models/petals.py index 7abc4590..7ceeef8b 100644 --- a/swarms/models/petals.py +++ b/swarms/models/petals.py @@ -1,4 +1,4 @@ -from transformers import AutoTokenizer, AutoModelForCausalLM +from transformers import AutoModelForCausalLM, AutoTokenizer class Petals: diff --git a/swarms/models/sam.py b/swarms/models/sam.py index 110d80b7..c51a2517 100644 --- a/swarms/models/sam.py +++ b/swarms/models/sam.py @@ -1,8 +1,9 @@ +from typing import List + +import requests import torch from PIL import Image -import requests from transformers import SamModel, SamProcessor -from typing import List device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/swarms/models/sam_supervision.py b/swarms/models/sam_supervision.py index 549844c2..5649f187 100644 --- a/swarms/models/sam_supervision.py +++ b/swarms/models/sam_supervision.py @@ -1,17 +1,18 @@ +from typing import Optional + import cv2 import numpy as np import supervision as sv from PIL import Image from transformers import ( - pipeline, + SamImageProcessor, SamModel, SamProcessor, - SamImageProcessor, + pipeline, ) -from typing import Optional -from swarms.utils.supervision_masking import masks_to_marks from swarms.models.base_multimodal_model import BaseMultiModalModel +from swarms.utils.supervision_masking import masks_to_marks class SegmentAnythingMarkGenerator(BaseMultiModalModel): diff --git a/swarms/models/sampling_params.py b/swarms/models/sampling_params.py index c2fdd121..d231c295 100644 --- a/swarms/models/sampling_params.py +++ b/swarms/models/sampling_params.py @@ -1,4 +1,5 @@ """Sampling parameters for text generation.""" + from enum import IntEnum from functools import cached_property from typing import Callable, List, Optional, Union @@ -104,7 +105,7 @@ class SamplingParams: use_beam_search: bool = False, length_penalty: float = 1.0, early_stopping: Union[bool, str] = False, - stop: Optional[Union[str, List[str]]] = None, + stop: Union[str, List[str], None] = None, stop_token_ids: Optional[List[int]] = None, include_stop_str_in_output: bool = False, ignore_eos: bool = False, diff --git a/swarms/models/speecht5.py b/swarms/models/speecht5.py index cc6ef931..b9f2653b 100644 --- a/swarms/models/speecht5.py +++ b/swarms/models/speecht5.py @@ -26,15 +26,16 @@ Blog Post: [https://huggingface.co/blog/speecht5] Demo: [https://huggingface.co/spaces/Matthijs/speecht5-tts-demo] """ -import torch + import soundfile as sf +import torch +from datasets import load_dataset from transformers import ( - pipeline, - SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, + SpeechT5Processor, + pipeline, ) -from datasets import load_dataset class SpeechT5: diff --git a/swarms/models/ssd_1b.py b/swarms/models/ssd_1b.py index d3b9086b..4479c866 100644 --- a/swarms/models/ssd_1b.py +++ b/swarms/models/ssd_1b.py @@ -7,11 +7,11 @@ from typing import List import backoff import torch +from cachetools import TTLCache from diffusers import StableDiffusionXLPipeline from PIL import Image from pydantic import validator from termcolor import colored -from cachetools import TTLCache @dataclass diff --git a/swarms/models/stable_diffusion.py b/swarms/models/stable_diffusion.py index a0068531..9ae45604 100644 --- a/swarms/models/stable_diffusion.py +++ b/swarms/models/stable_diffusion.py @@ -1,11 +1,12 @@ import base64 import os -import requests -import uuid import shutil -from dotenv import load_dotenv +import uuid from typing import List +import requests +from dotenv import load_dotenv + load_dotenv() stable_api_key = os.environ.get("STABLE_API_KEY") diff --git a/swarms/models/timm.py b/swarms/models/timm.py index de0484f2..f08afda3 100644 --- a/swarms/models/timm.py +++ b/swarms/models/timm.py @@ -3,6 +3,7 @@ from typing import List import timm import torch from torch import Tensor + from swarms.models.base_multimodal_model import BaseMultiModalModel diff --git a/swarms/models/types.py b/swarms/models/types.py index 460d0ef7..10957329 100644 --- a/swarms/models/types.py +++ b/swarms/models/types.py @@ -1,6 +1,7 @@ -from pydantic import BaseModel from typing import List, Optional +from pydantic import BaseModel + class TextModality(BaseModel): content: str diff --git a/swarms/models/ultralytics_model.py b/swarms/models/ultralytics_model.py index edb9984c..3cb9c956 100644 --- a/swarms/models/ultralytics_model.py +++ b/swarms/models/ultralytics_model.py @@ -1,7 +1,9 @@ -from swarms.models.base_multimodal_model import BaseMultiModalModel -from ultralytics import YOLO from typing import List +from ultralytics import YOLO + +from swarms.models.base_multimodal_model import BaseMultiModalModel + class UltralyticsModel(BaseMultiModalModel): """ diff --git a/swarms/models/vllm.py b/swarms/models/vllm.py index 0caeb3c8..cf9cda45 100644 --- a/swarms/models/vllm.py +++ b/swarms/models/vllm.py @@ -1,4 +1,5 @@ import torch + from swarms.models.base_llm import AbstractLLM if torch.cuda.is_available() or torch.cuda.device_count() > 0: diff --git a/swarms/models/zephyr.py b/swarms/models/zephyr.py index c5772295..205ec2e5 100644 --- a/swarms/models/zephyr.py +++ b/swarms/models/zephyr.py @@ -1,4 +1,5 @@ """Zephyr by HF""" + import torch from transformers import pipeline diff --git a/swarms/prompts/__init__.py b/swarms/prompts/__init__.py index 93416a9b..edec5906 100644 --- a/swarms/prompts/__init__.py +++ b/swarms/prompts/__init__.py @@ -1,4 +1,5 @@ from swarms.prompts.code_interpreter import CODE_INTERPRETER +from swarms.prompts.documentation import DOCUMENTATION_WRITER_SOP from swarms.prompts.finance_agent_prompt import FINANCE_AGENT_PROMPT from swarms.prompts.growth_agent_prompt import GROWTH_AGENT_PROMPT from swarms.prompts.legal_agent_prompt import LEGAL_AGENT_PROMPT @@ -6,7 +7,6 @@ from swarms.prompts.operations_agent_prompt import ( OPERATIONS_AGENT_PROMPT, ) from swarms.prompts.product_agent_prompt import PRODUCT_AGENT_PROMPT -from swarms.prompts.documentation import DOCUMENTATION_WRITER_SOP from swarms.prompts.schema_generator import SchemaGenerator __all__ = [ diff --git a/swarms/prompts/base.py b/swarms/prompts/base.py index a0e28c71..d104f468 100644 --- a/swarms/prompts/base.py +++ b/swarms/prompts/base.py @@ -1,7 +1,7 @@ from __future__ import annotations from abc import abstractmethod -from typing import TYPE_CHECKING, Any, Dict, List, Sequence +from typing import TYPE_CHECKING, Any, Sequence from pydantic import Field @@ -94,8 +94,8 @@ class BaseMessage(Serializable): class BaseMessageChunk(BaseMessage): def _merge_kwargs_dict( - self, left: Dict[str, Any], right: Dict[str, Any] - ) -> Dict[str, Any]: + self, left: dict[str, Any], right: dict[str, Any] + ) -> dict[str, Any]: """Merge additional_kwargs from another BaseMessageChunk into this one.""" merged = left.copy() for k, v in right.items(): @@ -223,7 +223,7 @@ def _message_to_dict(message: BaseMessage) -> dict: return {"type": message.type, "data": message.dict()} -def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]: +def messages_to_dict(messages: Sequence[BaseMessage]) -> list[dict]: """Convert a sequence of Messages to a list of dictionaries. Args: @@ -251,7 +251,7 @@ def _message_from_dict(message: dict) -> BaseMessage: raise ValueError(f"Got unexpected message type: {_type}") -def messages_from_dict(messages: List[dict]) -> List[BaseMessage]: +def messages_from_dict(messages: list[dict]) -> list[BaseMessage]: """Convert a sequence of messages from dicts to Message objects. Args: diff --git a/swarms/prompts/chat_prompt.py b/swarms/prompts/chat_prompt.py index 013aee28..49a0aa23 100644 --- a/swarms/prompts/chat_prompt.py +++ b/swarms/prompts/chat_prompt.py @@ -1,7 +1,7 @@ from __future__ import annotations from abc import abstractmethod -from typing import Dict, List, Sequence +from typing import Sequence class Message: @@ -11,7 +11,7 @@ class Message: """ def __init__( - self, content: str, role: str, additional_kwargs: Dict = None + self, content: str, role: str, additional_kwargs: dict = None ): self.content = content self.role = role @@ -33,7 +33,7 @@ class HumanMessage(Message): self, content: str, role: str = "Human", - additional_kwargs: Dict = None, + additional_kwargs: dict = None, example: bool = False, ): super().__init__(content, role, additional_kwargs) @@ -52,7 +52,7 @@ class AIMessage(Message): self, content: str, role: str = "AI", - additional_kwargs: Dict = None, + additional_kwargs: dict = None, example: bool = False, ): super().__init__(content, role, additional_kwargs) @@ -72,7 +72,7 @@ class SystemMessage(Message): self, content: str, role: str = "System", - additional_kwargs: Dict = None, + additional_kwargs: dict = None, ): super().__init__(content, role, additional_kwargs) @@ -90,7 +90,7 @@ class FunctionMessage(Message): content: str, role: str = "Function", name: str = None, - additional_kwargs: Dict = None, + additional_kwargs: dict = None, ): super().__init__(content, role, additional_kwargs) self.name = name @@ -105,7 +105,7 @@ class ChatMessage(Message): """ def __init__( - self, content: str, role: str, additional_kwargs: Dict = None + self, content: str, role: str, additional_kwargs: dict = None ): super().__init__(content, role, additional_kwargs) @@ -135,7 +135,7 @@ def message_to_dict(message: Message) -> dict: return {"type": message.get_type(), "data": message.__dict__} -def messages_to_dict(messages: Sequence[Message]) -> List[dict]: +def messages_to_dict(messages: Sequence[Message]) -> list[dict]: return [message_to_dict(m) for m in messages] @@ -155,5 +155,5 @@ def message_from_dict(message: dict) -> Message: raise ValueError(f"Got unexpected message type: {_type}") -def messages_from_dict(messages: List[dict]) -> List[Message]: +def messages_from_dict(messages: list[dict]) -> list[Message]: return [message_from_dict(m) for m in messages] diff --git a/swarms/structs/SWARMS.md b/swarms/structs/SWARMS.md index 1a417831..070eb176 100644 --- a/swarms/structs/SWARMS.md +++ b/swarms/structs/SWARMS.md @@ -31,7 +31,7 @@ class Orchestrator(ABC): # Break down main_task into smaller tasks # ... return sub_tasks - + def aggregate_results(self, sub_results: List[Any]) -> Any: # Combine results from sub-tasks into a cohesive output # ... diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index 40bd3325..d604cf07 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -1,4 +1,5 @@ from swarms.structs.agent import Agent +from swarms.structs.agent_base import AgentJob from swarms.structs.autoscaler import AutoScaler from swarms.structs.base import BaseStructure from swarms.structs.base_swarm import AbstractSwarm @@ -8,9 +9,21 @@ from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms.structs.conversation import Conversation from swarms.structs.graph_workflow import GraphWorkflow from swarms.structs.groupchat import GroupChat, GroupChatManager +from swarms.structs.majority_voting import ( + MajorityVoting, + majority_voting, + most_frequent, + parse_code_completion, +) from swarms.structs.message import Message from swarms.structs.model_parallizer import ModelParallelizer from swarms.structs.multi_agent_collab import MultiAgentCollaboration +from swarms.structs.multi_process_workflow import ( + MultiProcessingWorkflow, +) +from swarms.structs.multi_threaded_workflow import ( + MultiThreadedWorkflow, +) from swarms.structs.nonlinear_workflow import NonlinearWorkflow from swarms.structs.plan import Plan from swarms.structs.recursive_workflow import RecursiveWorkflow @@ -24,6 +37,7 @@ from swarms.structs.schemas import ( TaskRequestBody, ) from swarms.structs.sequential_workflow import SequentialWorkflow +from swarms.structs.stackoverflow_swarm import StackOverflowSwarm from swarms.structs.step import Step from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarming_architectures import ( @@ -47,6 +61,11 @@ from swarms.structs.swarming_architectures import ( star_swarm, ) from swarms.structs.task import Task +from swarms.structs.task_queue_base import ( + TaskQueueBase, + synchronized_queue, +) +from swarms.structs.tool_json_schema import JSON from swarms.structs.utils import ( detect_markdown, distribute_tasks, @@ -56,26 +75,6 @@ from swarms.structs.utils import ( find_token_in_text, parse_tasks, ) -from swarms.structs.tool_json_schema import JSON -from swarms.structs.majority_voting import ( - most_frequent, - parse_code_completion, - majority_voting, - MajorityVoting, -) -from swarms.structs.stackoverflow_swarm import StackOverflowSwarm -from swarms.structs.task_queue_base import ( - synchronized_queue, - TaskQueueBase, -) -from swarms.structs.multi_process_workflow import ( - MultiProcessingWorkflow, -) -from swarms.structs.multi_threaded_workflow import ( - MultiThreadedWorkflow, -) -from swarms.structs.agent_base import AgentJob - __all__ = [ "Agent", diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 16109dab..95c01c79 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -10,26 +10,26 @@ from typing import Any, Callable, Dict, List, Optional, Tuple from termcolor import colored from swarms.memory.base_vectordb import AbstractVectorDatabase -from swarms.prompts.agent_system_prompts import ( - AGENT_SYSTEM_PROMPT_3, -) +from swarms.prompts.agent_system_prompts import AGENT_SYSTEM_PROMPT_3 from swarms.prompts.multi_modal_autonomous_instruction_prompt import ( MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1, ) +from swarms.prompts.worker_prompt import worker_tools_sop_promp from swarms.structs.conversation import Conversation +from swarms.structs.schemas import Step from swarms.tokenizers.base_tokenizer import BaseTokenizer +from swarms.tools.exec_tool import execute_tool_by_name from swarms.tools.tool import BaseTool from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.data_to_text import data_to_text from swarms.utils.logger import logger -from swarms.utils.parse_code import ( - extract_code_from_markdown, -) +from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.token_count_tiktoken import limit_tokens_from_string -from swarms.tools.exec_tool import execute_tool_by_name -from swarms.prompts.worker_prompt import worker_tools_sop_promp -from swarms.structs.schemas import Step +from swarms.utils.video_to_frames import ( + save_frames_as_images, + video_to_frames, +) # Utils @@ -513,6 +513,7 @@ class Agent: self, task: Optional[str] = None, img: Optional[str] = None, + video: Optional[str] = None, *args, **kwargs, ): @@ -531,6 +532,12 @@ class Agent: """ try: + if video: + video_to_frames(video) + frames = save_frames_as_images(video) + for frame in frames: + img = frame + # Activate Autonomous agent message self.activate_autonomous_agent() @@ -578,6 +585,9 @@ class Agent: ) print(response) + # Add the response to the history + history.append(response) + # Log each step step = Step( input=task, @@ -631,8 +641,6 @@ class Agent: ) attempt += 1 time.sleep(self.retry_interval) - # Add the response to the history - history.append(response) time.sleep(self.loop_interval) # Add the history to the memory @@ -807,7 +815,7 @@ class Agent: Args: file_path (str): The path to the file containing the saved agent history. """ - with open(file_path, "r") as f: + with open(file_path) as f: self.short_memory = json.load(f) print(f"Loaded agent history from {file_path}") @@ -1096,7 +1104,7 @@ class Agent: >>> agent.run("Continue with the task") """ - with open(file_path, "r") as f: + with open(file_path) as f: state = json.load(f) # Restore other saved attributes diff --git a/swarms/structs/async_workflow.py b/swarms/structs/async_workflow.py index b46061b2..da144642 100644 --- a/swarms/structs/async_workflow.py +++ b/swarms/structs/async_workflow.py @@ -1,6 +1,7 @@ import asyncio from dataclasses import dataclass, field from typing import Any, Callable, List, Optional + from swarms.structs.task import Task from swarms.utils.logger import logger diff --git a/swarms/structs/autoscaler.py b/swarms/structs/autoscaler.py index f26247d5..4996b7d5 100644 --- a/swarms/structs/autoscaler.py +++ b/swarms/structs/autoscaler.py @@ -7,12 +7,12 @@ from typing import Callable, Dict, List, Optional from termcolor import colored from swarms.structs.agent import Agent +from swarms.structs.base import BaseStructure from swarms.utils.decorators import ( error_decorator, log_decorator, timing_decorator, ) -from swarms.structs.base import BaseStructure class AutoScaler(BaseStructure): @@ -241,7 +241,7 @@ class AutoScaler(BaseStructure): task = self.task_queue.get() if task: available_agent = next( - (agent for agent in self.agents_pool) + agent for agent in self.agents_pool ) if available_agent: available_agent.run(task) diff --git a/swarms/structs/base.py b/swarms/structs/base.py index adfa974d..9d3b4c15 100644 --- a/swarms/structs/base.py +++ b/swarms/structs/base.py @@ -1,11 +1,12 @@ +import asyncio +import concurrent.futures import json import os from abc import ABC -from typing import Optional, Any, Dict, List -from datetime import datetime -import asyncio -import concurrent.futures from concurrent.futures import ThreadPoolExecutor +from datetime import datetime +from typing import Any, Dict, List, Optional + import psutil try: @@ -81,7 +82,6 @@ class BaseStructure(ABC): def run(self, *args, **kwargs): """Run the structure.""" - pass def save_to_file(self, data: Any, file_path: str): """Save data to file. @@ -102,7 +102,7 @@ class BaseStructure(ABC): Returns: Any: _description_ """ - with open(file_path, "r") as file: + with open(file_path) as file: return json.load(file) def save_metadata(self, metadata: Dict[str, Any]): diff --git a/swarms/structs/base_multiagent_structure.py b/swarms/structs/base_multiagent_structure.py index 302f2902..48388df5 100644 --- a/swarms/structs/base_multiagent_structure.py +++ b/swarms/structs/base_multiagent_structure.py @@ -132,7 +132,7 @@ class BaseMultiAgentStructure: None """ try: - with open(filename, "r") as f: + with open(filename) as f: self.__dict__ = json.load(f) except Exception as e: logger.error(e) @@ -164,7 +164,7 @@ class BaseMultiAgentStructure: None """ try: - with open(filename, "r") as f: + with open(filename) as f: self.__dict__ = yaml.load(f) except Exception as e: logger.error(e) diff --git a/swarms/structs/base_swarm.py b/swarms/structs/base_swarm.py index a961be58..ed910546 100644 --- a/swarms/structs/base_swarm.py +++ b/swarms/structs/base_swarm.py @@ -3,7 +3,6 @@ from abc import ABC, abstractmethod from concurrent.futures import ThreadPoolExecutor, as_completed from typing import Any, Callable, Dict, List, Optional - from swarms.structs.agent import Agent @@ -59,17 +58,14 @@ class AbstractSwarm(ABC): """Initialize the swarm with agents""" self.agents = agents self.max_loops = max_loops - pass # @abstractmethod def communicate(self): """Communicate with the swarm through the orchestrator, protocols, and the universal communication layer""" - pass # @abstractmethod def run(self): """Run the swarm""" - pass def __call__( self, @@ -89,34 +85,28 @@ class AbstractSwarm(ABC): def step(self): """Step the swarm""" - pass # @abstractmethod def add_agent(self, agent: "Agent"): """Add a agent to the swarm""" - pass # @abstractmethod def remove_agent(self, agent: "Agent"): """Remove a agent from the swarm""" - pass # @abstractmethod def broadcast( self, message: str, sender: Optional["Agent"] = None ): """Broadcast a message to all agents""" - pass # @abstractmethod def reset(self): """Reset the swarm""" - pass # @abstractmethod def plan(self, task: str): """agents must individually plan using a workflow or pipeline""" - pass # @abstractmethod def direct_message( @@ -126,27 +116,22 @@ class AbstractSwarm(ABC): recipient: "Agent", ): """Send a direct message to a agent""" - pass # @abstractmethod def autoscaler(self, num_agents: int, agent: ["Agent"]): """Autoscaler that acts like kubernetes for autonomous agents""" - pass # @abstractmethod def get_agent_by_id(self, id: str) -> "Agent": """Locate a agent by id""" - pass # @abstractmethod def get_agent_by_name(self, name: str) -> "Agent": """Locate a agent by name""" - pass # @abstractmethod def assign_task(self, agent: "Agent", task: Any) -> Dict: """Assign a task to a agent""" - pass # @abstractmethod def get_all_tasks(self, agent: "Agent", task: Any): @@ -155,67 +140,54 @@ class AbstractSwarm(ABC): # @abstractmethod def get_finished_tasks(self) -> List[Dict]: """Get all finished tasks""" - pass # @abstractmethod def get_pending_tasks(self) -> List[Dict]: """Get all pending tasks""" - pass # @abstractmethod def pause_agent(self, agent: "Agent", agent_id: str): """Pause a agent""" - pass # @abstractmethod def resume_agent(self, agent: "Agent", agent_id: str): """Resume a agent""" - pass # @abstractmethod def stop_agent(self, agent: "Agent", agent_id: str): """Stop a agent""" - pass # @abstractmethod def restart_agent(self, agent: "Agent"): """Restart agent""" - pass # @abstractmethod def scale_up(self, num_agent: int): """Scale up the number of agents""" - pass # @abstractmethod def scale_down(self, num_agent: int): """Scale down the number of agents""" - pass # @abstractmethod def scale_to(self, num_agent: int): """Scale to a specific number of agents""" - pass # @abstractmethod def get_all_agents(self) -> List["Agent"]: """Get all agents""" - pass # @abstractmethod def get_swarm_size(self) -> int: """Get the size of the swarm""" - pass # #@abstractmethod def get_swarm_status(self) -> Dict: """Get the status of the swarm""" - pass # #@abstractmethod def save_swarm_state(self): """Save the swarm state""" - pass def batched_run(self, tasks: List[Any], *args, **kwargs): """_summary_ diff --git a/swarms/structs/base_workflow.py b/swarms/structs/base_workflow.py index 03e503cc..ace1fd3d 100644 --- a/swarms/structs/base_workflow.py +++ b/swarms/structs/base_workflow.py @@ -62,7 +62,6 @@ class BaseWorkflow(BaseStructure): Abstract method for the sequential loop. """ # raise NotImplementedError("You must implement this method") - pass def __log(self, message: str): """ @@ -298,7 +297,7 @@ class BaseWorkflow(BaseStructure): try: filepath = filepath or self.restore_state_filepath - with open(filepath, "r") as f: + with open(filepath) as f: state = json.load(f) self.max_loops = state["max_loops"] self.tasks = [] diff --git a/swarms/structs/blocksdict.py b/swarms/structs/blocksdict.py index 93aab729..0f978eee 100644 --- a/swarms/structs/blocksdict.py +++ b/swarms/structs/blocksdict.py @@ -1,8 +1,4 @@ -from typing import ( - Any, - Dict, - Optional, -) +from typing import Any, Dict, Optional from swarms.structs.base import BaseStructure diff --git a/swarms/structs/blockslist.py b/swarms/structs/blockslist.py index b2a4db08..fca1e846 100644 --- a/swarms/structs/blockslist.py +++ b/swarms/structs/blockslist.py @@ -1,8 +1,4 @@ -from typing import ( - Any, - List, - Optional, -) +from typing import Any, List, Optional from swarms.structs.base import BaseStructure diff --git a/swarms/structs/company.py b/swarms/structs/company.py index 11b6d61f..06b7bdfe 100644 --- a/swarms/structs/company.py +++ b/swarms/structs/company.py @@ -2,8 +2,8 @@ from dataclasses import dataclass, field from typing import Dict, List, Optional, Union from swarms.structs.agent import Agent -from swarms.utils.logger import logger from swarms.structs.conversation import Conversation +from swarms.utils.logger import logger @dataclass diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py index 8aa5399b..f36df3b3 100644 --- a/swarms/structs/concurrent_workflow.py +++ b/swarms/structs/concurrent_workflow.py @@ -1,10 +1,9 @@ import concurrent.futures from dataclasses import dataclass, field -from typing import Dict, List, Optional, Callable +from typing import Callable, Dict, List, Optional from swarms.structs.base import BaseStructure from swarms.structs.task import Task - from swarms.utils.logger import logger diff --git a/swarms/structs/conversation.py b/swarms/structs/conversation.py index 8132de87..4f426cbf 100644 --- a/swarms/structs/conversation.py +++ b/swarms/structs/conversation.py @@ -1,12 +1,12 @@ import datetime import json +from typing import Optional from termcolor import colored from swarms.memory.base_db import AbstractDatabase from swarms.structs.base import BaseStructure from swarms.tokenizers.base_tokenizer import BaseTokenizer -from typing import Optional class Conversation(BaseStructure): @@ -200,7 +200,7 @@ class Conversation(BaseStructure): Args: filename (str): filename to import from """ - with open(filename, "r") as f: + with open(filename) as f: for line in f: role, content = line.split(": ", 1) self.add(role, content.strip()) @@ -250,7 +250,7 @@ class Conversation(BaseStructure): """ # Load the conversation history from a JSON file if filename is not None: - with open(filename, "r") as f: + with open(filename) as f: self.conversation_history = json.load(f) def search_keyword_in_conversation(self, keyword: str): diff --git a/swarms/structs/debate.py b/swarms/structs/debate.py index 9db84f06..95c889d3 100644 --- a/swarms/structs/debate.py +++ b/swarms/structs/debate.py @@ -1,10 +1,10 @@ -import os import json - -from swarms.structs.agent import Agent +import os from datetime import datetime from typing import List +from swarms.structs.agent import Agent + NAME_LIST = [ "Affirmative side", "Negative side", @@ -23,9 +23,7 @@ class DebatePlayer(Agent): openai_api_key (str): As the parameter name suggests sleep_time (float): sleep because of rate limits """ - super(DebatePlayer, self).__init__( - llm=llm, agent_name=name, *args, **kwargs - ) + super().__init__(llm=llm, agent_name=name, *args, **kwargs) class Debate: diff --git a/swarms/structs/graph_workflow.py b/swarms/structs/graph_workflow.py index c4bcea7e..23d90339 100644 --- a/swarms/structs/graph_workflow.py +++ b/swarms/structs/graph_workflow.py @@ -1,8 +1,7 @@ -from swarms.structs.base import BaseStructure - - import logging +from swarms.structs.base import BaseStructure + class GraphWorkflow(BaseStructure): """ diff --git a/swarms/structs/groupchat.py b/swarms/structs/groupchat.py index 21fff944..57cb6472 100644 --- a/swarms/structs/groupchat.py +++ b/swarms/structs/groupchat.py @@ -1,6 +1,7 @@ import logging from dataclasses import dataclass from typing import Dict, List + from swarms.structs.agent import Agent logger = logging.getLogger(__name__) diff --git a/swarms/structs/load_balancer.py b/swarms/structs/load_balancer.py index f0038335..5e0178d7 100644 --- a/swarms/structs/load_balancer.py +++ b/swarms/structs/load_balancer.py @@ -1,5 +1,6 @@ -from typing import Optional, List import multiprocessing as mp +from typing import List, Optional + from swarms.structs.base import BaseStructure diff --git a/swarms/structs/long_swarm.py b/swarms/structs/long_swarm.py new file mode 100644 index 00000000..80d301cb --- /dev/null +++ b/swarms/structs/long_swarm.py @@ -0,0 +1,153 @@ +from typing import List + +from swarms.structs.agent import Agent +from swarms.utils.parse_code import extract_code_from_markdown + + +class LongContextSwarmLeader: + """ + Represents a leader in a long context swarm. + + Args: + - llm (str): The language model to use for the agent. + - agents (List[Agent]): The agents in the swarm. + - prompt_template_json (str): The SOP template in JSON format. + - return_parsed (bool): Whether to return the parsed output. + + """ + + def __init__( + self, + llm, + agents: List[Agent] = None, + prompt_template_json: str = None, + return_parsed: bool = False, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.llm = llm + self.agents = agents + self.prompt_template_json = prompt_template_json + self.return_parsed = return_parsed + + # Create an instance of the Agent class + self.agent = Agent( + llm=llm, + system_prompt=None, + sop=self.prompt_template_json, + *args, + **kwargs + ) + + + def prep_schema(self, task: str, *args, **kwargs): + """ + Returns a formatted string containing the metadata of all agents in the swarm. + + Parameters: + - task (str): The description of the task. + + Returns: + - prompt (str): The formatted string containing the agent metadata. + """ + prompt = f""" + + You need to recruit a team of members to solve a + task. Select the appropriate member based on the + task description: + + # Task Description + {task} + + # Members + + Your output must follow this JSON schema below in markdown format: + {{ + "agent_id": "string", + "agent_name": "string", + "agent_description": "string" + }} + + """ + for agent in self.agents: + prompt += f"Member Name: {agent.ai_name}\nMember ID: {agent.id}\nMember Description: {agent.description}\n\n" + + return prompt + + + def prep_schema_second( + self, + task_description: str, + task: str + ): + prompt = f""" + You are the leader of a team of {len(self.agents)} + members. Your team will need to collaborate to + solve a task. The rule is: + + 1. Only you know the task description and task + objective; the other members do not. + 2. But they will receive different documents that + may contain answers, and you need to send them + an instruction to query their document. + 3. Your instruction need to include your + understanding of the task and what you need them + to focus on. If necessary, your instructions can + explicitly include the task objective. + 4. Finally, you need to complete the task based on + the query results they return. + + # Task Description: + {task_description} + + # Task Objective: + {task} + + # Generate Instruction for Members: + Now, you need to generate an instruction for all + team members. You can ask them to answer a + certain question, or to extract information related + to the task, based on their respective documents. + Your output must following the JSON + format: {{"type": "instruction", "content": + "your_instruction_content"}} + + """ + return prompt + + + def run(self, task: str, *args, **kwargs): + """ + Executes the specified task using the agent's run method. + + Args: + task: The task to be executed. + *args: Additional positional arguments for the task. + **kwargs: Additional keyword arguments for the task. + + Returns: + The result of the task execution. + """ + task = self.prep_schema(task) + out = self.agent.run(task, *args, **kwargs) + + if self.return_parsed: + out = extract_code_from_markdown(out) + + return out + +# class LongContextSwarm(BaseSwarm): +# def __init__( +# self, +# agents: List[Agent], +# Leader: Agent, +# team_loops: int, +# *args, +# **kwargs, +# ): +# super().__init__() +# self.agents = agents +# self.leader = Leader +# self.team_loops = team_loops +# self.chunks = len(agents) diff --git a/swarms/structs/model_parallizer.py b/swarms/structs/model_parallizer.py index 828d4ef4..9d27f14c 100644 --- a/swarms/structs/model_parallizer.py +++ b/swarms/structs/model_parallizer.py @@ -96,7 +96,7 @@ class ModelParallelizer: @classmethod def load_llms_from_file(cls, filename): """Load llms from file""" - with open(filename, "r") as file: + with open(filename) as file: llms = [line.strip() for line in file.readlines()] return cls(llms) diff --git a/swarms/structs/multi_agent_collab.py b/swarms/structs/multi_agent_collab.py index 64b030d0..8359068d 100644 --- a/swarms/structs/multi_agent_collab.py +++ b/swarms/structs/multi_agent_collab.py @@ -299,7 +299,7 @@ class MultiAgentCollaboration: def load(self): """Loads the state of all agents.""" - with open(self.saved_file_path_name, "r") as file: + with open(self.saved_file_path_name) as file: state = json.load(file) self._step = state["step"] self.results = state["results"] diff --git a/swarms/structs/multi_threaded_workflow.py b/swarms/structs/multi_threaded_workflow.py index df17d8ce..475251ba 100644 --- a/swarms/structs/multi_threaded_workflow.py +++ b/swarms/structs/multi_threaded_workflow.py @@ -1,14 +1,15 @@ -import threading -from swarms.structs.base_workflow import BaseWorkflow import logging +import queue +import threading from concurrent.futures import ( FIRST_COMPLETED, ThreadPoolExecutor, wait, ) from typing import List + +from swarms.structs.base_workflow import BaseWorkflow from swarms.structs.task import Task -import queue logging.basicConfig( level=logging.INFO, diff --git a/swarms/structs/nonlinear_workflow.py b/swarms/structs/nonlinear_workflow.py index 0fc1d200..d86ef028 100644 --- a/swarms/structs/nonlinear_workflow.py +++ b/swarms/structs/nonlinear_workflow.py @@ -1,5 +1,5 @@ -from swarms.structs.task import Task from swarms.structs.base import BaseStructure +from swarms.structs.task import Task from swarms.utils.logger import logger # noqa: F401 diff --git a/swarms/structs/recursive_workflow.py b/swarms/structs/recursive_workflow.py index afeb91b7..60d33fe4 100644 --- a/swarms/structs/recursive_workflow.py +++ b/swarms/structs/recursive_workflow.py @@ -1,10 +1,9 @@ +import logging from typing import List from swarms.structs.base import BaseStructure from swarms.structs.task import Task -import logging - logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) diff --git a/swarms/structs/schemas.py b/swarms/structs/schemas.py index f7f5441e..f4cf6a3b 100644 --- a/swarms/structs/schemas.py +++ b/swarms/structs/schemas.py @@ -1,7 +1,7 @@ from __future__ import annotations from enum import Enum -from typing import Any, List, Optional +from typing import Any from pydantic import BaseModel, Field @@ -34,7 +34,7 @@ class Artifact(BaseModel): file_name: str = Field( ..., description="Filename of the artifact", example="main.py" ) - relative_path: Optional[str] = Field( + relative_path: str | None = Field( None, description=( "Relative path of the artifact in the agent's workspace" @@ -45,7 +45,7 @@ class Artifact(BaseModel): class ArtifactUpload(BaseModel): file: bytes = Field(..., description="File to upload") - relative_path: Optional[str] = Field( + relative_path: str | None = Field( None, description=( "Relative path of the artifact in the agent's workspace" @@ -77,14 +77,14 @@ class StepOutput(BaseModel): class TaskRequestBody(BaseModel): - input: Optional[str] = Field( + input: str | None = Field( None, description="Input prompt for the task.", example=( "Write the words you receive to the file 'output.txt'." ), ) - additional_input: Optional[TaskInput] = None + additional_input: TaskInput | None = None class Task(TaskRequestBody): @@ -93,7 +93,7 @@ class Task(TaskRequestBody): description="The ID of the task.", example="50da533e-3904-4401-8a07-c49adf88b5eb", ) - artifacts: List[Artifact] = Field( + artifacts: list[Artifact] = Field( [], description="A list of artifacts that the task has produced.", example=[ @@ -104,12 +104,12 @@ class Task(TaskRequestBody): class StepRequestBody(BaseModel): - input: Optional[str] = Field( + input: str | None = Field( None, description="Input prompt for the step.", example="Washington", ) - additional_input: Optional[StepInput] = None + additional_input: StepInput | None = None class Status(Enum): @@ -129,7 +129,7 @@ class Step(StepRequestBody): description="The ID of the task step.", example="6bb1801a-fd80-45e8-899a-4dd723cc602e", ) - name: Optional[str] = Field( + name: str | None = Field( None, description="The name of the task step.", example="Write to file", @@ -137,7 +137,7 @@ class Step(StepRequestBody): status: Status = Field( ..., description="The status of the task step." ) - output: Optional[str] = Field( + output: str | None = Field( None, description="Output of the task step.", example=( @@ -146,12 +146,12 @@ class Step(StepRequestBody): " > elements) & 0xFF) + f"{(uuid.getnode() >> elements) & 0xFF:02x}" for elements in range(0, 2 * 6, 8) ][::-1] ), diff --git a/swarms/tokenizers/__init__.py b/swarms/tokenizers/__init__.py index 02129cb9..d62146ca 100644 --- a/swarms/tokenizers/__init__.py +++ b/swarms/tokenizers/__init__.py @@ -1,15 +1,15 @@ -from swarms.tokenizers.r_tokenizers import ( - SentencePieceTokenizer, - HuggingFaceTokenizer, - Tokenizer, -) -from swarms.tokenizers.base_tokenizer import BaseTokenizer -from swarms.tokenizers.openai_tokenizers import OpenAITokenizer from swarms.tokenizers.anthropic_tokenizer import ( - import_optional_dependency, AnthropicTokenizer, + import_optional_dependency, ) +from swarms.tokenizers.base_tokenizer import BaseTokenizer from swarms.tokenizers.cohere_tokenizer import CohereTokenizer +from swarms.tokenizers.openai_tokenizers import OpenAITokenizer +from swarms.tokenizers.r_tokenizers import ( + HuggingFaceTokenizer, + SentencePieceTokenizer, + Tokenizer, +) __all__ = [ "SentencePieceTokenizer", diff --git a/swarms/tokenizers/anthropic_tokenizer.py b/swarms/tokenizers/anthropic_tokenizer.py index 94bced96..77cd07c3 100644 --- a/swarms/tokenizers/anthropic_tokenizer.py +++ b/swarms/tokenizers/anthropic_tokenizer.py @@ -3,7 +3,6 @@ from __future__ import annotations from dataclasses import dataclass from importlib import import_module from types import ModuleType -from typing import Optional from anthropic import Anthropic @@ -16,7 +15,7 @@ INSTALL_MAPPING = { } -def import_optional_dependency(name: str) -> Optional[ModuleType]: +def import_optional_dependency(name: str) -> ModuleType | None: """Import an optional dependency. If a dependency is missing, an ImportError with a nice message will be raised. diff --git a/swarms/tokenizers/base_tokenizer.py b/swarms/tokenizers/base_tokenizer.py index a14bbd09..fd1bc339 100644 --- a/swarms/tokenizers/base_tokenizer.py +++ b/swarms/tokenizers/base_tokenizer.py @@ -2,7 +2,6 @@ from __future__ import annotations from abc import ABC, abstractmethod from dataclasses import dataclass, field -from typing import List, Union @dataclass @@ -20,12 +19,12 @@ class BaseTokenizer(ABC): stop_token: str = "<|Response|>" def __post_init__(self): - self.stop_sequences: List[str] = field( + self.stop_sequences: list[str] = field( default_factory=lambda: ["<|Response|>"], init=False, ) - def count_tokens_left(self, text: Union[str, List[dict]]) -> int: + def count_tokens_left(self, text: str | list[dict]) -> int: """ Counts the number of tokens left based on the given text. @@ -43,7 +42,7 @@ class BaseTokenizer(ABC): return 0 @abstractmethod - def count_tokens(self, text: Union[str, List[dict]]) -> int: + def count_tokens(self, text: str | list[dict]) -> int: """ Counts the number of tokens in the given text. diff --git a/swarms/tokenizers/cohere_tokenizer.py b/swarms/tokenizers/cohere_tokenizer.py index 7387c836..e6164f5b 100644 --- a/swarms/tokenizers/cohere_tokenizer.py +++ b/swarms/tokenizers/cohere_tokenizer.py @@ -1,6 +1,7 @@ from __future__ import annotations from dataclasses import dataclass + from cohere import Client diff --git a/swarms/tokenizers/openai_tokenizers.py b/swarms/tokenizers/openai_tokenizers.py index b77a8efd..9b02943b 100644 --- a/swarms/tokenizers/openai_tokenizers.py +++ b/swarms/tokenizers/openai_tokenizers.py @@ -1,9 +1,11 @@ from __future__ import annotations + import logging from dataclasses import dataclass, field + import tiktoken from tiktoken import Encoding -from typing import Optional + from swarms.tokenizers.base_tokenizer import BaseTokenizer @@ -39,7 +41,7 @@ class OpenAITokenizer(BaseTokenizer): Sets the default maximum number of tokens. """ self.max_tokens: int = field( - default_factory=lambda: self.default_max_tokens() + default_factory=self.default_max_tokens ) self.DEFAULT_OPENAI_GPT_3_COMPLETION_MODEL = ( @@ -102,7 +104,7 @@ class OpenAITokenizer(BaseTokenizer): ) - offset def count_tokens( - self, text: str | list[dict], model: Optional[str] = None + self, text: str | list[dict], model: str | None = None ) -> int: """ Counts the number of tokens in the given text. @@ -171,7 +173,7 @@ class OpenAITokenizer(BaseTokenizer): else: return len(self.encoding.encode(text)) - def len(self, text: str | list[dict], model: Optional[str]): + def len(self, text: str | list[dict], model: str | None): """ Returns the length of the text in tokens. If a model is provided, uses that model for encoding. diff --git a/swarms/tokenizers/r_tokenizers.py b/swarms/tokenizers/r_tokenizers.py index cf8253fc..f807b6ff 100644 --- a/swarms/tokenizers/r_tokenizers.py +++ b/swarms/tokenizers/r_tokenizers.py @@ -59,7 +59,7 @@ class SentencePieceTokenizer: def _maybe_add_prefix_space(self, tokens, decoded): """maybe add prefix space for incremental decoding.""" if ( - len(tokens) + tokens and not decoded.startswith(" ") and tokens[0] in self.prefix_space_tokens ): @@ -179,7 +179,7 @@ class HuggingFaceTokenizer: model_dir, "generation_config.json" ) if osp.exists(generation_config_file): - with open(generation_config_file, "r") as f: + with open(generation_config_file) as f: cfg = json.load(f) self.model.eos_token_id = cfg["eos_token_id"] elif hasattr(self.model, "eod_id"): # Qwen remote @@ -228,7 +228,7 @@ class HuggingFaceTokenizer: ): """maybe add prefix space for incremental decoding.""" if ( - len(tokens) + tokens and not decoded.startswith(" ") and tokens[0] in self.prefix_space_tokens ): @@ -300,7 +300,7 @@ class HuggingFaceTokenizer: encoded = self.model.encode(s, **kwargs) if not add_bos: # in the middle of a session - if len(encoded) and encoded[0] == self.bos_token_id: + if encoded and encoded[0] == self.bos_token_id: encoded = encoded[1:] return encoded diff --git a/swarms/tools/__init__.py b/swarms/tools/__init__.py index c36c9608..8d98894a 100644 --- a/swarms/tools/__init__.py +++ b/swarms/tools/__init__.py @@ -1,18 +1,18 @@ -from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs from swarms.tools.code_executor import CodeExecutor -from swarms.tools.tool_utils import ( - tool_find_by_name, - extract_tool_commands, - parse_and_execute_tools, - execute_tools, -) -from swarms.tools.tool import BaseTool, Tool, StructuredTool, tool from swarms.tools.exec_tool import ( AgentAction, - BaseAgentOutputParser, - preprocess_json_input, AgentOutputParser, + BaseAgentOutputParser, execute_tool_by_name, + preprocess_json_input, +) +from swarms.tools.tool import BaseTool, StructuredTool, Tool, tool +from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs +from swarms.tools.tool_utils import ( + execute_tools, + extract_tool_commands, + parse_and_execute_tools, + tool_find_by_name, ) __all__ = [ diff --git a/swarms/tools/code_executor.py b/swarms/tools/code_executor.py index 3c369dae..5a85f14d 100644 --- a/swarms/tools/code_executor.py +++ b/swarms/tools/code_executor.py @@ -1,6 +1,6 @@ import os -import tempfile import subprocess +import tempfile class CodeExecutor: diff --git a/swarms/tools/format_tools.py b/swarms/tools/format_tools.py index 2e620135..46565f76 100644 --- a/swarms/tools/format_tools.py +++ b/swarms/tools/format_tools.py @@ -1,13 +1,14 @@ -from typing import List, Union, Dict, Any +import json +from typing import Any, Dict, List, Union + +from termcolor import cprint +from transformers import PreTrainedModel, PreTrainedTokenizer from swarms.tools.logits_processor import ( NumberStoppingCriteria, OutputNumbersTokens, StringStoppingCriteria, ) -from termcolor import cprint -from transformers import PreTrainedModel, PreTrainedTokenizer -import json GENERATION_MARKER = "|GENERATION|" diff --git a/swarms/tools/logits_processor.py b/swarms/tools/logits_processor.py index ed7fef18..f67ff451 100644 --- a/swarms/tools/logits_processor.py +++ b/swarms/tools/logits_processor.py @@ -1,9 +1,9 @@ +import torch from transformers import ( - PreTrainedTokenizer, LogitsWarper, + PreTrainedTokenizer, StoppingCriteria, ) -import torch class StringStoppingCriteria(StoppingCriteria): diff --git a/swarms/tools/tool.py b/swarms/tools/tool.py index f0090493..53436614 100644 --- a/swarms/tools/tool.py +++ b/swarms/tools/tool.py @@ -1,4 +1,5 @@ """Base implementation for tools or skills.""" + from __future__ import annotations import asyncio @@ -7,17 +8,7 @@ import warnings from abc import abstractmethod from functools import partial from inspect import signature -from typing import ( - Any, - Awaitable, - Callable, - Dict, - List, - Optional, - Tuple, - Type, - Union, -) +from typing import Any, Awaitable, Callable, Dict, Union from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( @@ -27,8 +18,12 @@ from langchain.callbacks.manager import ( CallbackManagerForToolRun, Callbacks, ) - from langchain.load.serializable import Serializable +from langchain.schema.runnable import ( + Runnable, + RunnableConfig, + RunnableSerializable, +) from pydantic import ( BaseModel, Extra, @@ -37,11 +32,6 @@ from pydantic import ( root_validator, validate_arguments, ) -from langchain.schema.runnable import ( - Runnable, - RunnableConfig, - RunnableSerializable, -) class SchemaAnnotationError(TypeError): @@ -50,7 +40,7 @@ class SchemaAnnotationError(TypeError): def _create_subset_model( name: str, model: BaseModel, field_names: list -) -> Type[BaseModel]: +) -> type[BaseModel]: """Create a pydantic model with only a subset of model's fields.""" fields = {} for field_name in field_names: @@ -60,7 +50,7 @@ def _create_subset_model( def _get_filtered_args( - inferred_model: Type[BaseModel], + inferred_model: type[BaseModel], func: Callable, ) -> dict: """Get the arguments from a function's signature.""" @@ -83,7 +73,7 @@ class _SchemaConfig: def create_schema_from_function( model_name: str, func: Callable, -) -> Type[BaseModel]: +) -> type[BaseModel]: """Create a pydantic schema from a function's signature. Args: model_name: Name to assign to the generated pydandic schema @@ -114,8 +104,6 @@ class ToolException(Exception): to the agent as observation, and printed in red on the console. """ - pass - class BaseTool(RunnableSerializable[Union[str, Dict], Any]): """Interface swarms tools must implement.""" @@ -158,7 +146,7 @@ class ChildTool(BaseTool): You can provide few-shot examples as a part of the description. """ - args_schema: Optional[Type[BaseModel]] = None + args_schema: type[BaseModel] | None = None """Pydantic model class to validate and parse the tool's input arguments.""" return_direct: bool = False """Whether to return the tool's output directly. Setting this to True means @@ -170,26 +158,26 @@ class ChildTool(BaseTool): callbacks: Callbacks = Field(default=None, exclude=True) """Callbacks to be called during tool execution.""" - callback_manager: Optional[BaseCallbackManager] = Field( + callback_manager: BaseCallbackManager | None = Field( default=None, exclude=True ) """Deprecated. Please use callbacks instead.""" - tags: Optional[List[str]] = None + tags: list[str] | None = None """Optional list of tags associated with the tool. Defaults to None These tags will be associated with each call to this tool, and passed as arguments to the handlers defined in `callbacks`. You can use these to eg identify a specific instance of a tool with its use case. """ - metadata: Optional[Dict[str, Any]] = None + metadata: dict[str, Any] | None = None """Optional metadata associated with the tool. Defaults to None This metadata will be associated with each call to this tool, and passed as arguments to the handlers defined in `callbacks`. You can use these to eg identify a specific instance of a tool with its use case. """ - handle_tool_error: Optional[ - Union[bool, str, Callable[[ToolException], str]] - ] = False + handle_tool_error: ( + bool | str | Callable[[ToolException], str] | None + ) = False """Handle the content of the ToolException thrown.""" class Config(Serializable.Config): @@ -214,7 +202,7 @@ class ChildTool(BaseTool): # --- Runnable --- @property - def input_schema(self) -> Type[BaseModel]: + def input_schema(self) -> type[BaseModel]: """The tool's input schema.""" if self.args_schema is not None: return self.args_schema @@ -223,8 +211,8 @@ class ChildTool(BaseTool): def invoke( self, - input: Union[str, Dict], - config: Optional[RunnableConfig] = None, + input: str | dict, + config: RunnableConfig | None = None, **kwargs: Any, ) -> Any: config = config or {} @@ -239,8 +227,8 @@ class ChildTool(BaseTool): async def ainvoke( self, - input: Union[str, Dict], - config: Optional[RunnableConfig] = None, + input: str | dict, + config: RunnableConfig | None = None, **kwargs: Any, ) -> Any: config = config or {} @@ -257,8 +245,8 @@ class ChildTool(BaseTool): def _parse_input( self, - tool_input: Union[str, Dict], - ) -> Union[str, Dict[str, Any]]: + tool_input: str | dict, + ) -> str | dict[str, Any]: """Convert tool input to pydantic model.""" input_args = self.args_schema if isinstance(tool_input, str): @@ -277,7 +265,7 @@ class ChildTool(BaseTool): return tool_input @root_validator(skip_on_failure=True) - def raise_deprecation(cls, values: Dict) -> Dict: + def raise_deprecation(cls, values: dict) -> dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( @@ -319,8 +307,8 @@ class ChildTool(BaseTool): ) def _to_args_and_kwargs( - self, tool_input: Union[str, Dict] - ) -> Tuple[Tuple, Dict]: + self, tool_input: str | dict + ) -> tuple[tuple, dict]: # For backwards compatibility, if run_input is a string, # pass as a positional argument. if isinstance(tool_input, str): @@ -330,15 +318,15 @@ class ChildTool(BaseTool): def run( self, - tool_input: Union[str, Dict], - verbose: Optional[bool] = None, - start_color: Optional[str] = "green", - color: Optional[str] = "green", + tool_input: str | dict, + verbose: bool | None = None, + start_color: str | None = "green", + color: str | None = "green", callbacks: Callbacks = None, *, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - run_name: Optional[str] = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + run_name: str | None = None, **kwargs: Any, ) -> Any: """Run the tool.""" @@ -422,15 +410,15 @@ class ChildTool(BaseTool): async def arun( self, - tool_input: Union[str, Dict], - verbose: Optional[bool] = None, - start_color: Optional[str] = "green", - color: Optional[str] = "green", + tool_input: str | dict, + verbose: bool | None = None, + start_color: str | None = "green", + color: str | None = "green", callbacks: Callbacks = None, *, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - run_name: Optional[str] = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + run_name: str | None = None, **kwargs: Any, ) -> Any: """Run the tool asynchronously.""" @@ -523,16 +511,16 @@ class Tool(BaseTool): """Tool that takes in function or coroutine directly.""" description: str = "" - func: Optional[Callable[..., str]] + func: Callable[..., str] | None """The function to run when the tool is called.""" - coroutine: Optional[Callable[..., Awaitable[str]]] = None + coroutine: Callable[..., Awaitable[str]] | None = None """The asynchronous version of the function.""" # --- Runnable --- async def ainvoke( self, - input: Union[str, Dict], - config: Optional[RunnableConfig] = None, + input: str | dict, + config: RunnableConfig | None = None, **kwargs: Any, ) -> Any: if not self.coroutine: @@ -555,8 +543,8 @@ class Tool(BaseTool): return {"tool_input": {"type": "string"}} def _to_args_and_kwargs( - self, tool_input: Union[str, Dict] - ) -> Tuple[Tuple, Dict]: + self, tool_input: str | dict + ) -> tuple[tuple, dict]: """Convert tool input to pydantic model.""" args, kwargs = super()._to_args_and_kwargs(tool_input) # For backwards compatibility. The tool must be run with a single input @@ -571,7 +559,7 @@ class Tool(BaseTool): def _run( self, *args: Any, - run_manager: Optional[CallbackManagerForToolRun] = None, + run_manager: CallbackManagerForToolRun | None = None, **kwargs: Any, ) -> Any: """Use the tool.""" @@ -597,7 +585,7 @@ class Tool(BaseTool): async def _arun( self, *args: Any, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, + run_manager: AsyncCallbackManagerForToolRun | None = None, **kwargs: Any, ) -> Any: """Use the tool asynchronously.""" @@ -629,26 +617,25 @@ class Tool(BaseTool): def __init__( self, name: str, - func: Optional[Callable], + func: Callable | None, description: str, **kwargs: Any, ) -> None: """Initialize tool.""" - super(Tool, self).__init__( + super().__init__( name=name, func=func, description=description, **kwargs ) @classmethod def from_function( cls, - func: Optional[Callable], + func: Callable | None, name: str, # We keep these required to support backwards compatibility description: str, return_direct: bool = False, - args_schema: Optional[Type[BaseModel]] = None, - coroutine: Optional[ - Callable[..., Awaitable[Any]] - ] = None, # This is last for compatibility, but should be after func + args_schema: type[BaseModel] | None = None, + coroutine: (Callable[..., Awaitable[Any]]) + | None = None, # This is last for compatibility, but should be after func **kwargs: Any, ) -> Tool: """Initialize tool from a function.""" @@ -671,20 +658,20 @@ class StructuredTool(BaseTool): """Tool that can operate on any number of inputs.""" description: str = "" - args_schema: Type[BaseModel] = Field( + args_schema: type[BaseModel] = Field( ..., description="The tool schema." ) """The input arguments' schema.""" - func: Optional[Callable[..., Any]] + func: Callable[..., Any] | None """The function to run when the tool is called.""" - coroutine: Optional[Callable[..., Awaitable[Any]]] = None + coroutine: Callable[..., Awaitable[Any]] | None = None """The asynchronous version of the function.""" # --- Runnable --- async def ainvoke( self, - input: Union[str, Dict], - config: Optional[RunnableConfig] = None, + input: str | dict, + config: RunnableConfig | None = None, **kwargs: Any, ) -> Any: if not self.coroutine: @@ -705,7 +692,7 @@ class StructuredTool(BaseTool): def _run( self, *args: Any, - run_manager: Optional[CallbackManagerForToolRun] = None, + run_manager: CallbackManagerForToolRun | None = None, **kwargs: Any, ) -> Any: """Use the tool.""" @@ -731,7 +718,7 @@ class StructuredTool(BaseTool): async def _arun( self, *args: Any, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, + run_manager: AsyncCallbackManagerForToolRun | None = None, **kwargs: Any, ) -> str: """Use the tool asynchronously.""" @@ -761,12 +748,12 @@ class StructuredTool(BaseTool): @classmethod def from_function( cls, - func: Optional[Callable] = None, - coroutine: Optional[Callable[..., Awaitable[Any]]] = None, - name: Optional[str] = None, - description: Optional[str] = None, + func: Callable | None = None, + coroutine: Callable[..., Awaitable[Any]] | None = None, + name: str | None = None, + description: str | None = None, return_direct: bool = False, - args_schema: Optional[Type[BaseModel]] = None, + args_schema: type[BaseModel] | None = None, infer_schema: bool = True, **kwargs: Any, ) -> StructuredTool: @@ -835,9 +822,9 @@ class StructuredTool(BaseTool): def tool( - *args: Union[str, Callable, Runnable], + *args: str | Callable | Runnable, return_direct: bool = False, - args_schema: Optional[Type[BaseModel]] = None, + args_schema: type[BaseModel] | None = None, infer_schema: bool = True, ) -> Callable: """Make tools out of functions, can be used with or without arguments. @@ -863,6 +850,7 @@ def tool( # Searches the API for the query. return + @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. @@ -870,9 +858,7 @@ def tool( """ def _make_with_name(tool_name: str) -> Callable: - def _make_tool( - dec_func: Union[Callable, Runnable] - ) -> BaseTool: + def _make_tool(dec_func: Callable | Runnable) -> BaseTool: if isinstance(dec_func, Runnable): runnable = dec_func @@ -885,7 +871,7 @@ def tool( ) async def ainvoke_wrapper( - callbacks: Optional[Callbacks] = None, + callbacks: Callbacks | None = None, **kwargs: Any, ) -> Any: return await runnable.ainvoke( @@ -893,7 +879,7 @@ def tool( ) def invoke_wrapper( - callbacks: Optional[Callbacks] = None, + callbacks: Callbacks | None = None, **kwargs: Any, ) -> Any: return runnable.invoke( @@ -902,9 +888,7 @@ def tool( coroutine = ainvoke_wrapper func = invoke_wrapper - schema: Optional[Type[BaseModel]] = ( - runnable.input_schema - ) + schema: type[BaseModel] | None = runnable.input_schema description = repr(runnable) elif inspect.iscoroutinefunction(dec_func): coroutine = dec_func diff --git a/swarms/tools/tool_func_doc_scraper.py b/swarms/tools/tool_func_doc_scraper.py index d233bfae..fccfc6a1 100644 --- a/swarms/tools/tool_func_doc_scraper.py +++ b/swarms/tools/tool_func_doc_scraper.py @@ -1,5 +1,6 @@ import inspect from typing import Callable + from termcolor import colored diff --git a/swarms/tools/tool_utils.py b/swarms/tools/tool_utils.py index a5a4e47c..ee6b6391 100644 --- a/swarms/tools/tool_utils.py +++ b/swarms/tools/tool_utils.py @@ -2,9 +2,7 @@ import json import re from typing import Any, List -from swarms.prompts.tools import ( - SCENARIOS, -) +from swarms.prompts.tools import SCENARIOS from swarms.tools.tool import BaseTool from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs diff --git a/swarms/utils/README.md b/swarms/utils/README.md index 8934d6f2..df8b294e 100644 --- a/swarms/utils/README.md +++ b/swarms/utils/README.md @@ -100,13 +100,16 @@ Here's the pseudocode algorithm for a `WorkerNode` class that includes a vector In Python, this could look something like: ```python -from langchain.vectorstores import FAISS +from collections import deque +from typing import Any, Dict + +import faiss from langchain.docstore import InMemoryDocstore from langchain.embeddings import OpenAIEmbeddings -import faiss +from langchain.vectorstores import FAISS + from swarms.workers.auto_agent import AutoGPT -from collections import deque -from typing import Dict, Any + class WorkerNode: def __init__(self, llm: AutoGPT, vectorstore: FAISS): @@ -118,20 +121,24 @@ class WorkerNode: def receive_task(self, task): self.task_queue.append(task) - self.task_status[task] = 'pending' + self.task_status[task] = "pending" def complete_task(self): task = self.task_queue.popleft() result = self.llm.run(task) self.completed_tasks.append(result) - self.task_status[task] = 'completed' + self.task_status[task] = "completed" # Insert task result into the vectorstore self.vectorstore.insert(task, result) return result def communicate(self): # Share task results and status through vectorstore - completed_tasks = [(task, self.task_status[task]) for task in self.task_queue if self.task_status[task] == 'completed'] + completed_tasks = [ + (task, self.task_status[task]) + for task in self.task_queue + if self.task_status[task] == "completed" + ] for task, status in completed_tasks: self.vectorstore.insert(task, status) ``` diff --git a/swarms/utils/__init__.py b/swarms/utils/__init__.py index 220bc3e6..de16f1ef 100644 --- a/swarms/utils/__init__.py +++ b/swarms/utils/__init__.py @@ -1,7 +1,19 @@ from swarms.utils.class_args_wrapper import print_class_parameters from swarms.utils.code_interpreter import SubprocessCodeInterpreter +from swarms.utils.data_to_text import ( + csv_to_text, + data_to_text, + json_to_text, + txt_to_text, +) from swarms.utils.device_checker_cuda import check_device +from swarms.utils.download_img import download_img_from_url +from swarms.utils.download_weights_from_url import ( + download_weights_from_url, +) +from swarms.utils.exponential_backoff import ExponentialBackoffMixin from swarms.utils.find_img_path import find_image_path +from swarms.utils.json_output_parser import JsonOutputParser from swarms.utils.llm_metrics_decorator import metrics_decorator from swarms.utils.load_model_torch import load_model_torch from swarms.utils.markdown_message import display_markdown_message @@ -11,38 +23,29 @@ from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.prep_torch_model_inference import ( prep_torch_inference, ) -from swarms.utils.token_count_tiktoken import limit_tokens_from_string -from swarms.utils.data_to_text import ( - csv_to_text, - json_to_text, - txt_to_text, - data_to_text, -) -from swarms.utils.try_except_wrapper import try_except_wrapper -from swarms.utils.download_weights_from_url import ( - download_weights_from_url, -) -from swarms.utils.save_logs import parse_log_file - - -######## -from swarms.utils.yaml_output_parser import YamlOutputParser -from swarms.utils.json_output_parser import JsonOutputParser from swarms.utils.remove_json_whitespace import ( remove_whitespace_from_json, remove_whitespace_from_yaml, ) -from swarms.utils.exponential_backoff import ExponentialBackoffMixin -from swarms.utils.download_img import download_img_from_url +from swarms.utils.save_logs import parse_log_file from swarms.utils.supervision_masking import ( FeatureType, compute_mask_iou_vectorized, - mask_non_max_suppression, filter_masks_by_relative_area, + mask_non_max_suppression, masks_to_marks, refine_marks, ) from swarms.utils.supervision_visualizer import MarkVisualizer +from swarms.utils.token_count_tiktoken import limit_tokens_from_string +from swarms.utils.try_except_wrapper import try_except_wrapper +from swarms.utils.video_to_frames import ( + save_frames_as_images, + video_to_frames, +) + +######## +from swarms.utils.yaml_output_parser import YamlOutputParser __all__ = [ "SubprocessCodeInterpreter", @@ -77,4 +80,6 @@ __all__ = [ "masks_to_marks", "refine_marks", "MarkVisualizer", + "video_to_frames", + "save_frames_as_images", ] diff --git a/swarms/utils/apa.py b/swarms/utils/apa.py index fa73b7b4..05b25c5c 100644 --- a/swarms/utils/apa.py +++ b/swarms/utils/apa.py @@ -1,8 +1,8 @@ -from enum import Enum, unique, auto import abc -from typing import List, Optional import json from dataclasses import dataclass, field +from enum import Enum, auto, unique +from typing import List, Optional @unique @@ -146,9 +146,7 @@ class Singleton(abc.ABCMeta, type): def __call__(cls, *args, **kwargs): """Call method for the singleton metaclass.""" if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__( - *args, **kwargs - ) + cls._instances[cls] = super().__call__(*args, **kwargs) return cls._instances[cls] diff --git a/swarms/utils/check_function_result.py b/swarms/utils/check_function_result.py index 0da3ad91..b3c88491 100644 --- a/swarms/utils/check_function_result.py +++ b/swarms/utils/check_function_result.py @@ -1,25 +1,25 @@ -import signal -import platform -import os -import multiprocessing -import tempfile import contextlib -from typing import Dict, Optional import faulthandler import io +import multiprocessing +import os +import platform +import signal +import tempfile +from typing import Dict, Optional class WriteOnlyStringIO(io.StringIO): """StringIO that throws an exception when it's read from""" def read(self, *args, **kwargs): - raise IOError + raise OSError def readline(self, *args, **kwargs): - raise IOError + raise OSError def readlines(self, *args, **kwargs): - raise IOError + raise OSError def readable(self, *args, **kwargs): """Returns True if the IO object can be read.""" diff --git a/swarms/utils/code_interpreter.py b/swarms/utils/code_interpreter.py index 9e27b668..e3850250 100644 --- a/swarms/utils/code_interpreter.py +++ b/swarms/utils/code_interpreter.py @@ -1,6 +1,6 @@ +import queue import subprocess import threading -import queue import time import traceback diff --git a/swarms/utils/data_to_text.py b/swarms/utils/data_to_text.py index 55f6e6cd..d8d72986 100644 --- a/swarms/utils/data_to_text.py +++ b/swarms/utils/data_to_text.py @@ -1,24 +1,25 @@ -import os import csv import json +import os + from swarms.utils.pdf_to_text import pdf_to_text def csv_to_text(file): - with open(file, "r") as file: + with open(file) as file: reader = csv.reader(file) data = list(reader) return str(data) def json_to_text(file): - with open(file, "r") as file: + with open(file) as file: data = json.load(file) return json.dumps(data) def txt_to_text(file): - with open(file, "r") as file: + with open(file) as file: data = file.read() return data @@ -28,7 +29,7 @@ def md_to_text(file): raise FileNotFoundError( f"No such file or directory: '{file}'" ) - with open(file, "r") as file: + with open(file) as file: data = file.read() return data @@ -71,8 +72,8 @@ def data_to_text(file): elif ext == ".md": return md_to_text(file) else: - with open(file, "r") as file: + with open(file) as file: data = file.read() return data except Exception as e: - raise IOError(f"Error reading file: {file}") from e + raise OSError(f"Error reading file: {file}") from e diff --git a/swarms/utils/device_checker_cuda.py b/swarms/utils/device_checker_cuda.py index dbf2191c..11b4559c 100644 --- a/swarms/utils/device_checker_cuda.py +++ b/swarms/utils/device_checker_cuda.py @@ -1,6 +1,7 @@ -import torch import logging -from typing import Union, List, Any +from typing import Any, List, Union + +import torch from torch.cuda import memory_allocated, memory_reserved diff --git a/swarms/utils/disable_logging.py b/swarms/utils/disable_logging.py index 7f04555f..368c85bf 100644 --- a/swarms/utils/disable_logging.py +++ b/swarms/utils/disable_logging.py @@ -1,11 +1,7 @@ import logging import os -import warnings import sys -import logging -import os import warnings -import sys def disable_logging(): diff --git a/swarms/utils/dist_utils.py b/swarms/utils/dist_utils.py index 76d9e03c..f255d506 100644 --- a/swarms/utils/dist_utils.py +++ b/swarms/utils/dist_utils.py @@ -11,7 +11,6 @@ from torch.distributed._tensor import ( ) from zeta.nn import QuantizedLN - try: from peft.tuners.lora import Linear as LoRALinear except ImportError: diff --git a/swarms/utils/download_img.py b/swarms/utils/download_img.py index 7791a80e..301e1ef6 100644 --- a/swarms/utils/download_img.py +++ b/swarms/utils/download_img.py @@ -1,4 +1,5 @@ from io import BytesIO + import requests from PIL import Image @@ -26,6 +27,6 @@ def download_img_from_url(url: str): print("Image downloaded successfully.") except requests.exceptions.RequestException as e: - raise IOError("Error while downloading the image.") from e - except IOError as e: - raise IOError("Error while saving the image.") from e + raise OSError("Error while downloading the image.") from e + except OSError as e: + raise OSError("Error while saving the image.") from e diff --git a/swarms/utils/execute_futures.py b/swarms/utils/execute_futures.py index bc2d47ef..13d9518e 100644 --- a/swarms/utils/execute_futures.py +++ b/swarms/utils/execute_futures.py @@ -1,12 +1,12 @@ from concurrent import futures from concurrent.futures import Future -from typing import TypeVar, Dict +from typing import Dict, TypeVar T = TypeVar("T") def execute_futures_dict( - fs_dict: Dict[str, Future[T]] + fs_dict: Dict[str, Future[T]], ) -> Dict[str, T]: """Execute a dictionary of futures and return the results. diff --git a/swarms/utils/exponential_backoff.py b/swarms/utils/exponential_backoff.py index 2238064a..cd00016c 100644 --- a/swarms/utils/exponential_backoff.py +++ b/swarms/utils/exponential_backoff.py @@ -1,7 +1,8 @@ import logging from abc import ABC from dataclasses import dataclass -from tenacity import Retrying, wait_exponential, stop_after_attempt + +from tenacity import Retrying, stop_after_attempt, wait_exponential @dataclass diff --git a/swarms/utils/hash.py b/swarms/utils/hash.py index 725cc6ba..0e82766b 100644 --- a/swarms/utils/hash.py +++ b/swarms/utils/hash.py @@ -1,6 +1,7 @@ -import pandas as pd import hashlib +import pandas as pd + def dataframe_to_hash(dataframe: pd.DataFrame) -> str: return hashlib.sha256( diff --git a/swarms/utils/json_output_parser.py b/swarms/utils/json_output_parser.py index 724d5ed5..4f76c3a5 100644 --- a/swarms/utils/json_output_parser.py +++ b/swarms/utils/json_output_parser.py @@ -1,6 +1,7 @@ import json import re from typing import Type, TypeVar + from pydantic import BaseModel, ValidationError T = TypeVar("T", bound=BaseModel) diff --git a/swarms/utils/jsonl_utils.py b/swarms/utils/jsonl_utils.py index 6f52caf5..95a0d9d6 100644 --- a/swarms/utils/jsonl_utils.py +++ b/swarms/utils/jsonl_utils.py @@ -1,8 +1,7 @@ -from typing import Iterable, Dict import gzip import json import os - +from typing import Dict, Iterable ROOT = os.path.dirname(os.path.abspath(__file__)) @@ -29,7 +28,7 @@ def stream_jsonl(filename: str) -> Iterable[Dict]: yield json.loads(line) else: - with open(filename, "r") as fp: + with open(filename) as fp: for line in fp: if any(not x.isspace() for x in line): yield json.loads(line) diff --git a/swarms/utils/loggers.py b/swarms/utils/loggers.py index 68477132..7ec3fcd2 100644 --- a/swarms/utils/loggers.py +++ b/swarms/utils/loggers.py @@ -1,14 +1,16 @@ """Logging modules""" + +import json import logging import os import random import re import time -import json from logging import LogRecord from typing import Any from colorama import Fore, Style + from swarms.utils.apa import Action, ToolCallStatus diff --git a/swarms/utils/prep_torch_model_inference.py b/swarms/utils/prep_torch_model_inference.py index 41bc07cc..1b88cab5 100644 --- a/swarms/utils/prep_torch_model_inference.py +++ b/swarms/utils/prep_torch_model_inference.py @@ -1,4 +1,5 @@ import torch + from swarms.utils.load_model_torch import load_model_torch diff --git a/swarms/utils/remove_json_whitespace.py b/swarms/utils/remove_json_whitespace.py index a5b3f7de..0a043e7c 100644 --- a/swarms/utils/remove_json_whitespace.py +++ b/swarms/utils/remove_json_whitespace.py @@ -1,4 +1,5 @@ import json + import yaml diff --git a/swarms/utils/save_logs.py b/swarms/utils/save_logs.py index c8193905..dd8810b1 100644 --- a/swarms/utils/save_logs.py +++ b/swarms/utils/save_logs.py @@ -25,7 +25,7 @@ def parse_log_file(filename: str): log_entries = [] - with open(filename, "r") as file: + with open(filename) as file: for line in file: parts = line.split(" - ") # Check if the log entry has the correct format diff --git a/swarms/utils/serializable.py b/swarms/utils/serializable.py index de9444ef..9e85e783 100644 --- a/swarms/utils/serializable.py +++ b/swarms/utils/serializable.py @@ -54,7 +54,7 @@ class Serializable(BaseModel, ABC): Return a map of constructor argument names to secret ids. eg. {"openai_api_key": "OPENAI_API_KEY"} """ - return dict() + return {} @property def lc_attributes(self) -> Dict: @@ -80,7 +80,7 @@ class Serializable(BaseModel, ABC): if not self.lc_serializable: return self.to_json_not_implemented() - secrets = dict() + secrets = {} # Get latest values for kwargs if there is an attribute with same name lc_kwargs = { k: getattr(self, k, v) diff --git a/swarms/utils/video_to_frames.py b/swarms/utils/video_to_frames.py new file mode 100644 index 00000000..ae16610c --- /dev/null +++ b/swarms/utils/video_to_frames.py @@ -0,0 +1,42 @@ +import cv2 +from typing import List + + +def video_to_frames(video_file: str) -> List: + """ + Convert a video into frames. + + Args: + video_file (str): The path to the video file. + + Returns: + List[np.array]: A list of frames from the video. + """ + # Open the video file + vidcap = cv2.VideoCapture(video_file) + + frames = [] + success, image = vidcap.read() + + while success: + frames.append(image) + success, image = vidcap.read() + + return frames + + +def save_frames_as_images(frames, output_dir) -> None: + """ + Save a list of frames as image files. + + Args: + frames (list of np.array): The list of frames. + output_dir (str): The directory where the images will be saved. + """ + for i, frame in enumerate(frames): + cv2.imwrite(f"{output_dir}/frame{i}.jpg", frame) + + +# out = save_frames_as_images(frames, "playground/demos/security_team/frames") + +# print(out) diff --git a/swarms/utils/yaml_output_parser.py b/swarms/utils/yaml_output_parser.py index 61be311b..5832bf16 100644 --- a/swarms/utils/yaml_output_parser.py +++ b/swarms/utils/yaml_output_parser.py @@ -1,7 +1,8 @@ import json import re -import yaml from typing import Type, TypeVar + +import yaml from pydantic import BaseModel, ValidationError T = TypeVar("T", bound=BaseModel) diff --git a/tests/README.md b/tests/README.md index dc527d9f..617f0a8a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -11,27 +11,31 @@ In Python, this would look something like: ```python import pytest + def test_WorkerNode_create_agent(): # assuming llm, tools, and vectorstore are initialized properly worker_node = WorkerNode(llm, tools, vectorstore) - worker_node.create_agent('test_agent', 'test_role', False, {}) + worker_node.create_agent("test_agent", "test_role", False, {}) assert worker_node.agent is not None assert worker_node.agent.chain.verbose + def test_WorkerNode_run_agent(): worker_node = WorkerNode(llm, tools, vectorstore) - worker_node.create_agent('test_agent', 'test_role', False, {}) - worker_node.run_agent('test prompt') # check it runs without error + worker_node.create_agent("test_agent", "test_role", False, {}) + worker_node.run_agent("test prompt") # check it runs without error + def test_BossNode_create_task(): # assuming llm, vectorstore, task_execution_chain are initialized properly boss_node = BossNode(llm, vectorstore, task_execution_chain, False, 3) - task = boss_node.create_task('test task') - assert task == {'objective': 'test task'} + task = boss_node.create_task("test task") + assert task == {"objective": "test task"} + def test_BossNode_execute_task(): boss_node = BossNode(llm, vectorstore, task_execution_chain, False, 3) - task = boss_node.create_task('test task') + task = boss_node.create_task("test task") boss_node.execute_task(task) # check it runs without error ``` @@ -51,21 +55,23 @@ Here is an example of what these tests could look like: ```python def test_WorkerNode_tools(): worker_node = WorkerNode(llm, tools, vectorstore) - worker_node.create_agent('test_agent', 'test_role', False, {}) - + worker_node.create_agent("test_agent", "test_role", False, {}) + # Check that all tools are instantiated for tool in worker_node.tools: assert tool is not None + def test_BossNode_AgentExecutor(): boss_node = BossNode(llm, vectorstore, task_execution_chain, False, 3) - + # Check that the AgentExecutor is correctly initialized assert boss_node.baby_agi.task_execution_chain is not None + def test_BossNode_LLMChain(): boss_node = BossNode(llm, vectorstore, task_execution_chain, False, 3) - + # Check that the LLMChain in ZeroShotAgent is working assert boss_node.baby_agi.task_execution_chain.agent.llm_chain is not None ``` diff --git a/tests/memory/test_dictinternalmemory.py b/tests/memory/test_dictinternalmemory.py index 3265ae50..7658eb7c 100644 --- a/tests/memory/test_dictinternalmemory.py +++ b/tests/memory/test_dictinternalmemory.py @@ -1,8 +1,10 @@ # DictInternalMemory +from uuid import uuid4 + import pytest + from swarms.memory import DictInternalMemory -from uuid import uuid4 # Example of an extensive suite of tests for DictInternalMemory. diff --git a/tests/memory/test_dictsharedmemory.py b/tests/memory/test_dictsharedmemory.py index 8537d6b8..a41ccd8f 100644 --- a/tests/memory/test_dictsharedmemory.py +++ b/tests/memory/test_dictsharedmemory.py @@ -1,6 +1,8 @@ import os import tempfile + import pytest + from swarms.memory import DictSharedMemory # Utility functions or fixtures might come first diff --git a/tests/memory/test_langchainchromavectormemory.py b/tests/memory/test_langchainchromavectormemory.py index dc41fa8a..ee882c6c 100644 --- a/tests/memory/test_langchainchromavectormemory.py +++ b/tests/memory/test_langchainchromavectormemory.py @@ -1,8 +1,10 @@ # LangchainChromaVectorMemory +from unittest.mock import MagicMock, patch + import pytest + from swarms.memory import LangchainChromaVectorMemory -from unittest.mock import MagicMock, patch # Fixtures for setting up the memory and mocks diff --git a/tests/memory/test_pinecone.py b/tests/memory/test_pinecone.py index f385f058..a7d4fcea 100644 --- a/tests/memory/test_pinecone.py +++ b/tests/memory/test_pinecone.py @@ -1,5 +1,6 @@ import os from unittest.mock import patch + from swarms.memory.pinecone import PineconeDB api_key = os.getenv("PINECONE_API_KEY") or "" diff --git a/tests/memory/test_qdrant.py b/tests/memory/test_qdrant.py index eb9bfef6..5f82814c 100644 --- a/tests/memory/test_qdrant.py +++ b/tests/memory/test_qdrant.py @@ -1,6 +1,7 @@ -import pytest from unittest.mock import Mock, patch +import pytest + from swarms.memory.qdrant import Qdrant diff --git a/tests/memory/test_short_term_memory.py b/tests/memory/test_short_term_memory.py index 0b66b749..132da5f6 100644 --- a/tests/memory/test_short_term_memory.py +++ b/tests/memory/test_short_term_memory.py @@ -1,6 +1,7 @@ -from swarms.memory.short_term_memory import ShortTermMemory import threading +from swarms.memory.short_term_memory import ShortTermMemory + def test_init(): memory = ShortTermMemory() diff --git a/tests/memory/test_sqlite.py b/tests/memory/test_sqlite.py index 6b4213b0..49d61ef7 100644 --- a/tests/memory/test_sqlite.py +++ b/tests/memory/test_sqlite.py @@ -1,5 +1,7 @@ -import pytest import sqlite3 + +import pytest + from swarms.memory.sqlite import SQLiteDB diff --git a/tests/memory/test_weaviate.py b/tests/memory/test_weaviate.py index f9e61c8f..d1a69da0 100644 --- a/tests/memory/test_weaviate.py +++ b/tests/memory/test_weaviate.py @@ -1,5 +1,7 @@ -import pytest from unittest.mock import Mock, patch + +import pytest + from swarms.memory import WeaviateDB diff --git a/tests/models/test_biogpt.py b/tests/models/test_biogpt.py index 38be125d..e6093729 100644 --- a/tests/models/test_biogpt.py +++ b/tests/models/test_biogpt.py @@ -9,9 +9,7 @@ from transformers import BioGptForCausalLM, BioGptTokenizer # Fixture for BioGPT instance @pytest.fixture def biogpt_instance(): - from swarms.models import ( - BioGPT, - ) + from swarms.models import BioGPT return BioGPT() @@ -20,28 +18,32 @@ def biogpt_instance(): def test_biomedical_response_1(biogpt_instance): question = "What are the functions of the mitochondria?" response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) # 37. Test for a genetics-based question def test_genetics_response(biogpt_instance): question = "Can you explain the Mendelian inheritance?" response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) # 38. Test for a question about viruses def test_virus_response(biogpt_instance): question = "How do RNA viruses replicate?" response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) # 39. Test for a cell biology related question def test_cell_biology_response(biogpt_instance): question = "Describe the cell cycle and its phases." response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) # 40. Test for a question about protein structure @@ -51,28 +53,32 @@ def test_protein_structure_response(biogpt_instance): " structures in proteins?" ) response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) # 41. Test for a pharmacology question def test_pharmacology_response(biogpt_instance): question = "How do beta blockers work?" response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) # 42. Test for an anatomy-based question def test_anatomy_response(biogpt_instance): question = "Describe the structure of the human heart." response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) # 43. Test for a question about bioinformatics def test_bioinformatics_response(biogpt_instance): question = "What is a BLAST search?" response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) # 44. Test for a neuroscience question @@ -81,14 +87,16 @@ def test_neuroscience_response(biogpt_instance): "Explain the function of synapses in the nervous system." ) response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) # 45. Test for an immunology question def test_immunology_response(biogpt_instance): question = "What is the role of T cells in the immune response?" response = biogpt_instance(question) - assert response and isinstance(response, str) + assert response + assert isinstance(response, str) def test_init(bio_gpt): diff --git a/tests/models/test_cohere.py b/tests/models/test_cohere.py index 5e6fc948..8a1147d3 100644 --- a/tests/models/test_cohere.py +++ b/tests/models/test_cohere.py @@ -3,6 +3,7 @@ from unittest.mock import Mock, patch import pytest from dotenv import load_dotenv + from swarms.models.cohere_chat import BaseCohere, Cohere # Load the environment variables diff --git a/tests/models/test_elevenlab.py b/tests/models/test_elevenlab.py index 0ba975ca..da41ca53 100644 --- a/tests/models/test_elevenlab.py +++ b/tests/models/test_elevenlab.py @@ -1,11 +1,13 @@ +import os +from unittest.mock import mock_open, patch + import pytest -from unittest.mock import patch, mock_open +from dotenv import load_dotenv + from swarms.models.eleven_labs import ( - ElevenLabsText2SpeechTool, ElevenLabsModel, + ElevenLabsText2SpeechTool, ) -import os -from dotenv import load_dotenv load_dotenv() diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index 2a1d4ad4..a61d1676 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -1,5 +1,7 @@ +from unittest.mock import Mock, patch + import pytest -from unittest.mock import patch, Mock + from swarms.models.gemini import Gemini diff --git a/tests/models/test_gpt4_vision_api.py b/tests/models/test_gpt4_vision_api.py index 26f60960..ac797280 100644 --- a/tests/models/test_gpt4_vision_api.py +++ b/tests/models/test_gpt4_vision_api.py @@ -1,8 +1,9 @@ import asyncio import os from unittest.mock import AsyncMock, Mock, mock_open, patch -from aiohttp import ClientResponseError + import pytest +from aiohttp import ClientResponseError from dotenv import load_dotenv from requests.exceptions import RequestException diff --git a/tests/models/test_hf.py b/tests/models/test_hf.py index 48dcd008..cbbba940 100644 --- a/tests/models/test_hf.py +++ b/tests/models/test_hf.py @@ -1,8 +1,8 @@ -import torch import logging from unittest.mock import patch import pytest +import torch from swarms.models.huggingface import HuggingfaceLLM @@ -83,18 +83,12 @@ def test_load_model(mock_huggingface_llm): llm = HuggingfaceLLM(model_id="test_model") llm.load_model() - # Ensure that the load_model function is called - assert True - # Test running the model def test_run(mock_huggingface_llm): llm = HuggingfaceLLM(model_id="test_model") llm.run("Test prompt") - # Ensure that the run function is called - assert True - # Test for setting max_length def test_llm_set_max_length(llm_instance): diff --git a/tests/models/test_idefics.py b/tests/models/test_idefics.py index 25a8dd5b..3bfee679 100644 --- a/tests/models/test_idefics.py +++ b/tests/models/test_idefics.py @@ -1,10 +1,12 @@ -import pytest from unittest.mock import patch + +import pytest import torch + from swarms.models.idefics import ( + AutoProcessor, Idefics, IdeficsForVisionText2Text, - AutoProcessor, ) diff --git a/tests/models/test_jina_embeds.py b/tests/models/test_jina_embeds.py index dd102d7c..0f59e477 100644 --- a/tests/models/test_jina_embeds.py +++ b/tests/models/test_jina_embeds.py @@ -1,5 +1,6 @@ import pytest import torch + from swarms.models.jina_embeds import JinaEmbeddings @@ -61,7 +62,8 @@ def test_cosine_similarity(model): embeddings2 = model.run(task2) sim = model.cos_sim(embeddings1, embeddings2) assert isinstance(sim, torch.Tensor) - assert sim.item() >= -1.0 and sim.item() <= 1.0 + assert sim.item() >= -1.0 + assert sim.item() <= 1.0 def test_failed_load_model(caplog): diff --git a/tests/models/test_llama_function_caller.py b/tests/models/test_llama_function_caller.py index 56ad481d..1e9df654 100644 --- a/tests/models/test_llama_function_caller.py +++ b/tests/models/test_llama_function_caller.py @@ -1,4 +1,5 @@ import pytest + from swarms.models.llama_function_caller import LlamaFunctionCaller diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index 10b47810..432c02c1 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -1,4 +1,5 @@ from unittest.mock import patch + from swarms.models.mistral import Mistral diff --git a/tests/models/test_mixtral.py b/tests/models/test_mixtral.py index 9eb31af0..a68a9026 100644 --- a/tests/models/test_mixtral.py +++ b/tests/models/test_mixtral.py @@ -1,5 +1,7 @@ +from unittest.mock import MagicMock, patch + import pytest -from unittest.mock import patch, MagicMock + from swarms.models.mixtral import Mixtral diff --git a/tests/models/test_open_dalle.py b/tests/models/test_open_dalle.py index 2483d705..4ff14e10 100644 --- a/tests/models/test_open_dalle.py +++ b/tests/models/test_open_dalle.py @@ -1,5 +1,6 @@ import pytest import torch + from swarms.models.open_dalle import OpenDalle diff --git a/tests/models/test_openaitts.py b/tests/models/test_openaitts.py index b6a4a7ff..42745284 100644 --- a/tests/models/test_openaitts.py +++ b/tests/models/test_openaitts.py @@ -1,5 +1,7 @@ +from unittest.mock import MagicMock, patch + import pytest -from unittest.mock import patch, MagicMock + from swarms.models.openai_tts import OpenAITTS diff --git a/tests/models/test_qwen.py b/tests/models/test_qwen.py index 28178fc0..a920256c 100644 --- a/tests/models/test_qwen.py +++ b/tests/models/test_qwen.py @@ -1,4 +1,5 @@ from unittest.mock import Mock, patch + from swarms.models.qwen import QwenVLMultiModal diff --git a/tests/models/test_speech_t5.py b/tests/models/test_speech_t5.py index a33272fc..d32c21db 100644 --- a/tests/models/test_speech_t5.py +++ b/tests/models/test_speech_t5.py @@ -1,6 +1,8 @@ -import pytest import os + +import pytest import torch + from swarms.models.speecht5 import SpeechT5 diff --git a/tests/models/test_ssd_1b.py b/tests/models/test_ssd_1b.py index 39e4264e..f658f853 100644 --- a/tests/models/test_ssd_1b.py +++ b/tests/models/test_ssd_1b.py @@ -1,7 +1,8 @@ import pytest -from swarms.models.ssd_1b import SSD1B from PIL import Image +from swarms.models.ssd_1b import SSD1B + # Create fixtures if needed @pytest.fixture diff --git a/tests/models/test_timm.py b/tests/models/test_timm.py index fae5f704..4af689e5 100644 --- a/tests/models/test_timm.py +++ b/tests/models/test_timm.py @@ -1,7 +1,9 @@ from unittest.mock import patch -from swarms.models import TimmModel + import torch +from swarms.models import TimmModel + def test_timm_model_init(): with patch("swarms.models.timm.list_models") as mock_list_models: diff --git a/tests/models/test_timm_model.py b/tests/models/test_timm_model.py index 0ced344e..b2f8f6c9 100644 --- a/tests/models/test_timm_model.py +++ b/tests/models/test_timm_model.py @@ -1,6 +1,8 @@ from unittest.mock import Mock -import torch + import pytest +import torch + from swarms.models.timm import TimmModel diff --git a/tests/models/test_togther.py b/tests/models/test_togther.py index 43a99b00..dd2a2f89 100644 --- a/tests/models/test_togther.py +++ b/tests/models/test_togther.py @@ -1,8 +1,10 @@ -import requests +import logging +from unittest.mock import Mock, patch + import pytest -from unittest.mock import patch, Mock +import requests + from swarms.models.together import TogetherLLM -import logging @pytest.fixture diff --git a/tests/models/test_ultralytics.py b/tests/models/test_ultralytics.py index 3e7a7b5c..cca1d023 100644 --- a/tests/models/test_ultralytics.py +++ b/tests/models/test_ultralytics.py @@ -1,4 +1,5 @@ from unittest.mock import patch + from swarms.models.ultralytics_model import UltralyticsModel diff --git a/tests/models/test_vilt.py b/tests/models/test_vilt.py index 99e6848e..d849f98e 100644 --- a/tests/models/test_vilt.py +++ b/tests/models/test_vilt.py @@ -1,6 +1,8 @@ +from unittest.mock import Mock, patch + import pytest -from unittest.mock import patch, Mock -from swarms.models.vilt import Vilt, Image, requests + +from swarms.models.vilt import Image, Vilt, requests # Fixture for Vilt instance diff --git a/tests/models/test_yi_200k.py b/tests/models/test_yi_200k.py index 9f3c236f..b31daa3e 100644 --- a/tests/models/test_yi_200k.py +++ b/tests/models/test_yi_200k.py @@ -1,6 +1,7 @@ import pytest import torch from transformers import AutoTokenizer + from swarms.models.yi_200k import Yi34B200k diff --git a/tests/structs/test_agent.py b/tests/structs/test_agent.py index 8e5b11be..5be7f31a 100644 --- a/tests/structs/test_agent.py +++ b/tests/structs/test_agent.py @@ -184,7 +184,7 @@ def test_save_different_memory(basic_flow, tmp_path): file_path = tmp_path / "memory.json" basic_flow.memory.append(["Task1", "Task2", "Task3"]) basic_flow.save(file_path) - with open(file_path, "r") as f: + with open(file_path) as f: data = json.load(f) assert data == [["Task1", "Task2", "Task3"]] diff --git a/tests/structs/test_autoscaler.py b/tests/structs/test_autoscaler.py index ac3da51a..2e5585bf 100644 --- a/tests/structs/test_autoscaler.py +++ b/tests/structs/test_autoscaler.py @@ -1,9 +1,8 @@ import os - -from dotenv import load_dotenv from unittest.mock import MagicMock, patch import pytest +from dotenv import load_dotenv from swarms.models import OpenAIChat from swarms.structs import Agent diff --git a/tests/structs/test_base.py b/tests/structs/test_base.py index 8b54dec0..971f966b 100644 --- a/tests/structs/test_base.py +++ b/tests/structs/test_base.py @@ -1,6 +1,8 @@ -import pytest import os from datetime import datetime + +import pytest + from swarms.structs.base import BaseStructure @@ -52,7 +54,7 @@ class TestBaseStructure: base_structure.log_error(error_message) log_file = os.path.join(tmp_dir, "TestStructure_errors.log") - with open(log_file, "r") as file: + with open(log_file) as file: lines = file.readlines() assert len(lines) == 1 assert lines[0] == f"{error_message}\n" @@ -83,7 +85,7 @@ class TestBaseStructure: base_structure.log_event(event, event_type) log_file = os.path.join(tmp_dir, "TestStructure_events.log") - with open(log_file, "r") as file: + with open(log_file) as file: lines = file.readlines() assert len(lines) == 1 assert ( @@ -122,7 +124,7 @@ class TestBaseStructure: await base_structure.log_error_async(error_message) log_file = os.path.join(tmp_dir, "TestStructure_errors.log") - with open(log_file, "r") as file: + with open(log_file) as file: lines = file.readlines() assert len(lines) == 1 assert lines[0] == f"{error_message}\n" @@ -165,7 +167,7 @@ class TestBaseStructure: await base_structure.log_event_async(event, event_type) log_file = os.path.join(tmp_dir, "TestStructure_events.log") - with open(log_file, "r") as file: + with open(log_file) as file: lines = file.readlines() assert len(lines) == 1 assert ( diff --git a/tests/structs/test_base_workflow.py b/tests/structs/test_base_workflow.py index 17be5ea8..ccb7a563 100644 --- a/tests/structs/test_base_workflow.py +++ b/tests/structs/test_base_workflow.py @@ -1,11 +1,12 @@ +import json import os + import pytest -import json +from dotenv import load_dotenv + from swarms.models import OpenAIChat from swarms.structs import BaseWorkflow -from dotenv import load_dotenv - load_dotenv() api_key = os.environ.get("OPENAI_API_KEY") diff --git a/tests/structs/test_company.py b/tests/structs/test_company.py index 0b1ec105..6de14da1 100644 --- a/tests/structs/test_company.py +++ b/tests/structs/test_company.py @@ -1,7 +1,8 @@ import pytest + +from swarms import OpenAIChat from swarms.structs.agent import Agent from swarms.structs.company import Company -from swarms import OpenAIChat # Mock OpenAIChat instance llm = OpenAIChat(openai_api_key="test_key", max_tokens=4000) diff --git a/tests/structs/test_concurrent_workflow.py b/tests/structs/test_concurrent_workflow.py index 206e8e2a..e3fabdd5 100644 --- a/tests/structs/test_concurrent_workflow.py +++ b/tests/structs/test_concurrent_workflow.py @@ -1,6 +1,7 @@ -from unittest.mock import Mock, create_autospec, patch from concurrent.futures import Future -from swarms.structs import ConcurrentWorkflow, Task, Agent +from unittest.mock import Mock, create_autospec, patch + +from swarms.structs import Agent, ConcurrentWorkflow, Task def test_add(): diff --git a/tests/structs/test_conversation.py b/tests/structs/test_conversation.py index 84673a42..049f3fb3 100644 --- a/tests/structs/test_conversation.py +++ b/tests/structs/test_conversation.py @@ -1,4 +1,5 @@ import pytest + from swarms.structs.conversation import Conversation diff --git a/tests/structs/test_json.py b/tests/structs/test_json.py index 4e086d37..9ba11072 100644 --- a/tests/structs/test_json.py +++ b/tests/structs/test_json.py @@ -2,8 +2,10 @@ # Contents of test_json.py, which must be placed in the `tests/` directory. -import pytest import json + +import pytest + from swarms.tokenizers import JSON diff --git a/tests/structs/test_majority_voting.py b/tests/structs/test_majority_voting.py index 45474a2d..dcd25f0b 100644 --- a/tests/structs/test_majority_voting.py +++ b/tests/structs/test_majority_voting.py @@ -1,4 +1,5 @@ from unittest.mock import MagicMock + import pytest from swarms.structs.agent import Agent diff --git a/tests/structs/test_model_parallizer.py b/tests/structs/test_model_parallizer.py index 37ca43db..a0840608 100644 --- a/tests/structs/test_model_parallizer.py +++ b/tests/structs/test_model_parallizer.py @@ -1,11 +1,12 @@ import pytest -from swarms.structs.model_parallizer import ModelParallelizer + from swarms.models import ( + GPT4VisionAPI, HuggingfaceLLM, Mixtral, - GPT4VisionAPI, ZeroscopeTTV, ) +from swarms.structs.model_parallizer import ModelParallelizer # Initialize the models custom_config = { diff --git a/tests/structs/test_multi_agent_collab.py b/tests/structs/test_multi_agent_collab.py index 475b32b3..555771e7 100644 --- a/tests/structs/test_multi_agent_collab.py +++ b/tests/structs/test_multi_agent_collab.py @@ -1,12 +1,12 @@ import json import os -import pytest from unittest.mock import Mock -from swarms.structs import Agent + +import pytest + from swarms.models import OpenAIChat -from swarms.structs.multi_agent_collab import ( - MultiAgentCollaboration, -) +from swarms.structs import Agent +from swarms.structs.multi_agent_collab import MultiAgentCollaboration # Sample agents for testing agent1 = Agent(llm=OpenAIChat(), max_loops=2) @@ -126,7 +126,7 @@ def test_save(collaboration, tmp_path): collaboration.saved_file_path_name = tmp_path / "test_save.json" collaboration.save() - with open(collaboration.saved_file_path_name, "r") as file: + with open(collaboration.saved_file_path_name) as file: saved_data = json.load(file) assert saved_data["_step"] == collaboration._step diff --git a/tests/structs/test_nonlinear_workflow.py b/tests/structs/test_nonlinear_workflow.py index 8919fc76..2544a7e4 100644 --- a/tests/structs/test_nonlinear_workflow.py +++ b/tests/structs/test_nonlinear_workflow.py @@ -1,6 +1,7 @@ import pytest -from swarms.structs import NonlinearWorkflow, Task + from swarms.models import OpenAIChat +from swarms.structs import NonlinearWorkflow, Task class TestNonlinearWorkflow: diff --git a/tests/structs/test_recursive_workflow.py b/tests/structs/test_recursive_workflow.py index 171c7cad..5b24f921 100644 --- a/tests/structs/test_recursive_workflow.py +++ b/tests/structs/test_recursive_workflow.py @@ -1,5 +1,7 @@ -import pytest from unittest.mock import Mock, create_autospec + +import pytest + from swarms.models import OpenAIChat from swarms.structs import RecursiveWorkflow, Task diff --git a/tests/structs/test_task.py b/tests/structs/test_task.py index 8a76549c..de0352af 100644 --- a/tests/structs/test_task.py +++ b/tests/structs/test_task.py @@ -1,3 +1,5 @@ +import datetime +from datetime import timedelta from unittest.mock import Mock import pytest @@ -9,8 +11,6 @@ from swarms.prompts.multi_modal_autonomous_instruction_prompt import ( ) from swarms.structs.agent import Agent from swarms.structs.task import Task -import datetime -from datetime import timedelta load_dotenv() diff --git a/tests/structs/test_taskqueuebase.py b/tests/structs/test_taskqueuebase.py index f37e2ca5..512f72ae 100644 --- a/tests/structs/test_taskqueuebase.py +++ b/tests/structs/test_taskqueuebase.py @@ -2,8 +2,10 @@ import threading from unittest.mock import Mock + import pytest -from swarms.tokenizers import TaskQueueBase, Task, Agent + +from swarms.tokenizers import Agent, Task, TaskQueueBase # Create mocked instances of dependencies diff --git a/tests/structs/test_tests_graph_workflow.py b/tests/structs/test_tests_graph_workflow.py index c4bafd35..cb5b17a7 100644 --- a/tests/structs/test_tests_graph_workflow.py +++ b/tests/structs/test_tests_graph_workflow.py @@ -1,4 +1,5 @@ import pytest + from swarms.structs.graph_workflow import GraphWorkflow diff --git a/tests/test_upload_tests_to_issues.py b/tests/test_upload_tests_to_issues.py index 15de1245..0857c58a 100644 --- a/tests/test_upload_tests_to_issues.py +++ b/tests/test_upload_tests_to_issues.py @@ -1,5 +1,6 @@ import os import subprocess + import requests from dotenv import load_dotenv diff --git a/tests/tokenizers/test_anthropictokenizer.py b/tests/tokenizers/test_anthropictokenizer.py index 5d49b5eb..14b2fd86 100644 --- a/tests/tokenizers/test_anthropictokenizer.py +++ b/tests/tokenizers/test_anthropictokenizer.py @@ -1,6 +1,7 @@ # AnthropicTokenizer import pytest + from swarms.tokenizers.anthropic_tokenizer import AnthropicTokenizer diff --git a/tests/tokenizers/test_basetokenizer.py b/tests/tokenizers/test_basetokenizer.py index 9bd5d9c6..3956d2de 100644 --- a/tests/tokenizers/test_basetokenizer.py +++ b/tests/tokenizers/test_basetokenizer.py @@ -1,6 +1,7 @@ # BaseTokenizer import pytest + from swarms.tokenizers.base_tokenizer import BaseTokenizer diff --git a/tests/tokenizers/test_coheretokenizer.py b/tests/tokenizers/test_coheretokenizer.py index 65633d9a..2607cf9a 100644 --- a/tests/tokenizers/test_coheretokenizer.py +++ b/tests/tokenizers/test_coheretokenizer.py @@ -1,8 +1,10 @@ # CohereTokenizer +from unittest.mock import MagicMock + import pytest + from swarms.tokenizers.cohere_tokenizer import CohereTokenizer -from unittest.mock import MagicMock @pytest.fixture diff --git a/tests/tokenizers/test_huggingfacetokenizer.py b/tests/tokenizers/test_huggingfacetokenizer.py index 3a0d29af..1eedb6e5 100644 --- a/tests/tokenizers/test_huggingfacetokenizer.py +++ b/tests/tokenizers/test_huggingfacetokenizer.py @@ -1,8 +1,10 @@ # HuggingFaceTokenizer -import pytest import os from unittest.mock import patch + +import pytest + from swarms.tokenizers.r_tokenizers import HuggingFaceTokenizer diff --git a/tests/tokenizers/test_openaitokenizer.py b/tests/tokenizers/test_openaitokenizer.py index 229db92d..3c24748d 100644 --- a/tests/tokenizers/test_openaitokenizer.py +++ b/tests/tokenizers/test_openaitokenizer.py @@ -1,6 +1,7 @@ # OpenAITokenizer import pytest + import swarms.tokenizers.openai_tokenizers as tokenizers diff --git a/tests/tokenizers/test_tokenizer.py b/tests/tokenizers/test_tokenizer.py index ea40a2e0..b868f0a1 100644 --- a/tests/tokenizers/test_tokenizer.py +++ b/tests/tokenizers/test_tokenizer.py @@ -1,8 +1,9 @@ # Tokenizer -from swarms.tokenizers.r_tokenizers import Tokenizer from unittest.mock import patch +from swarms.tokenizers.r_tokenizers import Tokenizer + def test_initializer_existing_model_file(): with patch("os.path.exists", return_value=True): diff --git a/tests/utils/test_check_device.py b/tests/utils/test_check_device.py index d542803a..503a3774 100644 --- a/tests/utils/test_check_device.py +++ b/tests/utils/test_check_device.py @@ -1,5 +1,7 @@ -import torch import logging + +import torch + from swarms.utils import check_device # For the purpose of the test, we're assuming that the `memory_allocated` diff --git a/tests/utils/test_class_args_wrapper.py b/tests/utils/test_class_args_wrapper.py index a222ffe9..99d38b2c 100644 --- a/tests/utils/test_class_args_wrapper.py +++ b/tests/utils/test_class_args_wrapper.py @@ -1,11 +1,13 @@ -import pytest -from io import StringIO from contextlib import redirect_stdout -from swarms.utils.class_args_wrapper import print_class_parameters -from swarms.structs.agent import Agent +from io import StringIO + +import pytest from fastapi import FastAPI from fastapi.testclient import TestClient +from swarms.structs.agent import Agent +from swarms.utils.class_args_wrapper import print_class_parameters + app = FastAPI() diff --git a/tests/utils/test_device.py b/tests/utils/test_device.py index 14399de9..9be83be4 100644 --- a/tests/utils/test_device.py +++ b/tests/utils/test_device.py @@ -1,6 +1,8 @@ -import torch from unittest.mock import MagicMock + import pytest +import torch + from swarms.utils.device_checker_cuda import check_device diff --git a/tests/utils/test_display_markdown_message.py b/tests/utils/test_display_markdown_message.py index 048038b2..1b7cadaa 100644 --- a/tests/utils/test_display_markdown_message.py +++ b/tests/utils/test_display_markdown_message.py @@ -1,10 +1,12 @@ # import necessary modules +from unittest import mock + import pytest -from swarms.utils import display_markdown_message from rich.console import Console from rich.markdown import Markdown from rich.rule import Rule -from unittest import mock + +from swarms.utils import display_markdown_message def test_basic_message(): diff --git a/tests/utils/test_extract_code_from_markdown.py b/tests/utils/test_extract_code_from_markdown.py index 9d37fc94..eb1a3e5d 100644 --- a/tests/utils/test_extract_code_from_markdown.py +++ b/tests/utils/test_extract_code_from_markdown.py @@ -1,4 +1,5 @@ import pytest + from swarms.utils import extract_code_from_markdown diff --git a/tests/utils/test_find_image_path.py b/tests/utils/test_find_image_path.py index 9fbc09ee..29b1c627 100644 --- a/tests/utils/test_find_image_path.py +++ b/tests/utils/test_find_image_path.py @@ -1,8 +1,10 @@ # Filename: test_utils.py +import os + import pytest + from swarms.utils import find_image_path -import os def test_find_image_path_no_images(): diff --git a/tests/utils/test_limit_tokens_from_string.py b/tests/utils/test_limit_tokens_from_string.py index 5b5f8efd..4d68dccb 100644 --- a/tests/utils/test_limit_tokens_from_string.py +++ b/tests/utils/test_limit_tokens_from_string.py @@ -1,4 +1,5 @@ import pytest + from swarms.utils import limit_tokens_from_string diff --git a/tests/utils/test_load_model_torch.py b/tests/utils/test_load_model_torch.py index ef2c17d4..c2018c6a 100644 --- a/tests/utils/test_load_model_torch.py +++ b/tests/utils/test_load_model_torch.py @@ -1,6 +1,7 @@ import pytest import torch from torch import nn + from swarms.utils import load_model_torch diff --git a/tests/utils/test_load_models_torch.py b/tests/utils/test_load_models_torch.py index 707f1ce4..3f09f411 100644 --- a/tests/utils/test_load_models_torch.py +++ b/tests/utils/test_load_models_torch.py @@ -1,6 +1,8 @@ +from unittest.mock import MagicMock + import pytest import torch -from unittest.mock import MagicMock + from swarms.utils.load_model_torch import load_model_torch diff --git a/tests/utils/test_metrics_decorator.py b/tests/utils/test_metrics_decorator.py index 7a676657..719d50a7 100644 --- a/tests/utils/test_metrics_decorator.py +++ b/tests/utils/test_metrics_decorator.py @@ -1,10 +1,11 @@ # pytest imports -import pytest +import time from unittest.mock import Mock +import pytest + # Imports from your project from swarms.utils import metrics_decorator -import time # Basic successful test diff --git a/tests/utils/test_pdf_to_text.py b/tests/utils/test_pdf_to_text.py index 704f7449..f271af60 100644 --- a/tests/utils/test_pdf_to_text.py +++ b/tests/utils/test_pdf_to_text.py @@ -1,5 +1,5 @@ -import pytest import pypdf +import pytest from swarms.utils import pdf_to_text diff --git a/tests/utils/test_prep_torch_inference.py b/tests/utils/test_prep_torch_inference.py index 8ee33fbc..6af4a9a7 100644 --- a/tests/utils/test_prep_torch_inference.py +++ b/tests/utils/test_prep_torch_inference.py @@ -1,7 +1,9 @@ import unittest +from unittest.mock import Mock + import pytest import torch -from unittest.mock import Mock + from swarms.utils import prep_torch_inference diff --git a/tests/utils/test_prep_torch_model_inference.py b/tests/utils/test_prep_torch_model_inference.py index 4a13bee1..07da4e97 100644 --- a/tests/utils/test_prep_torch_model_inference.py +++ b/tests/utils/test_prep_torch_model_inference.py @@ -1,5 +1,7 @@ -import torch from unittest.mock import MagicMock + +import torch + from swarms.utils.prep_torch_model_inference import ( prep_torch_inference, ) diff --git a/tests/utils/test_print_class_parameters.py b/tests/utils/test_print_class_parameters.py index ae824170..9a133ae4 100644 --- a/tests/utils/test_print_class_parameters.py +++ b/tests/utils/test_print_class_parameters.py @@ -1,4 +1,5 @@ import pytest + from swarms.utils import print_class_parameters diff --git a/tests/utils/test_subprocess_code_interpreter.py b/tests/utils/test_subprocess_code_interpreter.py index 3ce54530..3bb800f5 100644 --- a/tests/utils/test_subprocess_code_interpreter.py +++ b/tests/utils/test_subprocess_code_interpreter.py @@ -1,10 +1,12 @@ -import pytest +import queue import subprocess import threading -import queue -from swarms.utils.code_interpreter import ( + +import pytest + +from swarms.utils.code_interpreter import ( # Adjust the import according to your project structure SubprocessCodeInterpreter, -) # Adjust the import according to your project structure +) # Fixture for the SubprocessCodeInterpreter instance