diff --git a/playground/agents/multion_agent.py b/multion_agent.py similarity index 63% rename from playground/agents/multion_agent.py rename to multion_agent.py index a6f2ce24..6830e973 100644 --- a/playground/agents/multion_agent.py +++ b/multion_agent.py @@ -1,10 +1,9 @@ from swarms.agents.multion_agent import MultiOnAgent import timeit from swarms import Agent, ConcurrentWorkflow, Task -from swarms.utils.loguru_logger import logger # model -model = MultiOnAgent(multion_api_key="") +model = MultiOnAgent(multion_api_key="535ae401948b4c59bc1b2c61eec90fe6") # out = model.run("search for a recipe") agent = Agent( @@ -15,27 +14,26 @@ agent = Agent( system_prompt=None, ) -logger.info("[Agent][ID][MultiOnAgent][Initialized][Successfully") +# logger.info("[Agent][ID][MultiOnAgent][Initialized][Successfully") # Task task = Task( agent=agent, description=( - "send an email to vyom on superhuman for a partnership with" - " multion" + "Download https://www.coachcamel.com/" ), ) # Swarm -logger.info( - f"Running concurrent workflow with task: {task.description}" -) +# logger.info( +# f"Running concurrent workflow with task: {task.description}" +# ) # Measure execution time start_time = timeit.default_timer() workflow = ConcurrentWorkflow( - max_workers=1, + max_workers=20, autosave=True, print_results=True, return_results=True, @@ -47,4 +45,5 @@ workflow.run() # Calculate execution time execution_time = timeit.default_timer() - start_time -logger.info(f"Execution time: {execution_time} seconds") +# logger.info(f"Execution time: {execution_time} seconds") +print(f"Execution time: {execution_time} seconds") \ No newline at end of file diff --git a/playground/diy/hierchical_example.py b/playground/diy/hierchical_example.py deleted file mode 100644 index 73b58f45..00000000 --- a/playground/diy/hierchical_example.py +++ /dev/null @@ -1,28 +0,0 @@ -from swarms import HierarchicalSwarm - -swarm = HierarchicalSwarm( - openai_api_key="key", - model_type="openai", - model_id="gpt-4", - use_vectorstore=False, - use_async=False, - human_in_the_loop=False, - logging_enabled=False, -) - -# run the swarm with an objective -result = swarm.run("Design a new car") - -# or huggingface -swarm = HierarchicalSwarm( - model_type="huggingface", - model_id="tiaueu/falcon", - use_vectorstore=True, - embedding_size=768, - use_async=False, - human_in_the_loop=True, - logging_enabled=False, -) - -# Run the swarm with a particular objective -result = swarm.run("Write a sci-fi short story") diff --git a/playground/memory/chroma_usage_example.py b/playground/memory/chroma_usage_example.py index c17efa3a..d00822b0 100644 --- a/playground/memory/chroma_usage_example.py +++ b/playground/memory/chroma_usage_example.py @@ -1,11 +1,17 @@ -from swarms.memory import chroma +from swarms.memory import ChromaDB -chromadbcl = chroma.ChromaClient() -chromadbcl.add_vectors( - ["This is a document", "BONSAIIIIIII", "the walking dead"] +# Initialize the memory +chroma = ChromaDB( + metric="cosine", + limit_tokens=1000, + verbose=True, ) -results = chromadbcl.search_vectors("zombie", limit=1) +# Add text +text = "This is a test" +chroma.add(text) + +# Search for similar text +similar_text = chroma.query(text) -print(results) diff --git a/playground/structs/flow_example.py b/playground/structs/agent_basic_customize.py similarity index 100% rename from playground/structs/flow_example.py rename to playground/structs/agent_basic_customize.py diff --git a/playground/structs/agent_with_longterm.py b/playground/structs/agent_with_longterm_memory.py similarity index 97% rename from playground/structs/agent_with_longterm.py rename to playground/structs/agent_with_longterm_memory.py index 588d6546..2dcc30ac 100644 --- a/playground/structs/agent_with_longterm.py +++ b/playground/structs/agent_with_longterm_memory.py @@ -14,7 +14,7 @@ api_key = os.environ.get("OPENAI_API_KEY") # Initilaize the chromadb client chromadb = ChromaDB( - metric="cosine", + metric="cosine",g output="results", ) diff --git a/playground/structs/chat_example.py b/playground/structs/chat_example.py deleted file mode 100644 index 08783068..00000000 --- a/playground/structs/chat_example.py +++ /dev/null @@ -1,11 +0,0 @@ -from swarms import Orchestrator, Worker - -# Instantiate the Orchestrator with 10 agents -orchestrator = Orchestrator( - Worker, agent_list=[Worker] * 10, task_queue=[] -) - -# Agent 1 sends a message to Agent 2 -orchestrator.chat( - sender_id=1, receiver_id=2, message="Hello, Agent 2!" -) diff --git a/playground/structs/company_example.py b/playground/structs/company_example.py index 72396c61..1f008e77 100644 --- a/playground/structs/company_example.py +++ b/playground/structs/company_example.py @@ -1,4 +1,3 @@ -# Example import os diff --git a/playground/structs/dialogue_simulator_example.py b/playground/structs/dialogue_simulator_example.py index 14c35b7e..b83e13ef 100644 --- a/playground/structs/dialogue_simulator_example.py +++ b/playground/structs/dialogue_simulator_example.py @@ -1,6 +1,5 @@ from swarms.models import OpenAIChat -from swarms.swarms import DialogueSimulator -from swarms.workers.worker import Worker +from swarms import DialogueSimulator, Worker llm = OpenAIChat( model_name="gpt-4", openai_api_key="api-key", temperature=0.5 diff --git a/playground/structs/easy_example.py b/playground/structs/easy_example.py index 2a537c10..bebdb11a 100644 --- a/playground/structs/easy_example.py +++ b/playground/structs/easy_example.py @@ -1,7 +1,14 @@ -from swarms import swarm +from swarms import Agent, OpenAIChat -# Use the function -api_key = "APIKEY" -objective = "What is the capital of the UK?" -result = swarm(api_key, objective) -print(result) # Prints: "The capital of the UK is London." +## Initialize the workflow +agent = Agent( + llm=OpenAIChat(), + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, +) + +# Run the workflow on a task +agent("Find a chick fil a equivalent in hayes valley") diff --git a/playground/structs/godmode_example.py b/playground/structs/godmode_example.py index 46f71393..5d3cef83 100644 --- a/playground/structs/godmode_example.py +++ b/playground/structs/godmode_example.py @@ -3,7 +3,7 @@ import os from dotenv import load_dotenv from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat -from swarms.swarms import ModelParallelizer +from swarms import ModelParallelizer load_dotenv() diff --git a/playground/structs/orchestrator_example.py b/playground/structs/orchestrator_example.py deleted file mode 100644 index 6b91b74f..00000000 --- a/playground/structs/orchestrator_example.py +++ /dev/null @@ -1,19 +0,0 @@ -from swarms import Orchestrator, Worker - -node = Worker( - openai_api_key="", - ai_name="Optimus Prime", -) - - -# Instantiate the Orchestrator with 10 agents -orchestrator = Orchestrator( - node, agent_list=[node] * 10, task_queue=[] -) - -# Agent 7 sends a message to Agent 9 -orchestrator.chat( - sender_id=7, - receiver_id=9, - message="Can you help me with this task?", -) diff --git a/playground/structs/social_app_example.py b/playground/structs/social_app_example.py deleted file mode 100644 index 8bf90bf5..00000000 --- a/playground/structs/social_app_example.py +++ /dev/null @@ -1,19 +0,0 @@ -from ..swarms import HierarchicalSwarm - -# Retrieve your API key from the environment or replace with your actual key -api_key = "sksdsds" - -# Initialize HierarchicalSwarm with your API key -swarm = HierarchicalSwarm(openai_api_key=api_key) - -# Define an objective -objective = """ -Please develop and serve a simple community web service. -People can signup, login, post, comment. -Post and comment should be visible at once. -I want it to have neumorphism-style. -The ports you can use are 4500 and 6500. -""" - -# Run HierarchicalSwarm -swarm.run(objective) diff --git a/playground/structs/stackoverflow_swarm_example.py b/playground/structs/stackoverflow_swarm_example.py deleted file mode 100644 index e69de29b..00000000 diff --git a/playground/structs/swarms_example.py b/playground/structs/swarms_example.py deleted file mode 100644 index 9f015807..00000000 --- a/playground/structs/swarms_example.py +++ /dev/null @@ -1,16 +0,0 @@ -from swarms import HierarchicalSwarm - -# Retrieve your API key from the environment or replace with your actual key -api_key = "" - -# Initialize HierarchicalSwarm with your API key -swarm = HierarchicalSwarm(api_key) - -# Define an objective -objective = ( - "Find 20 potential customers for a HierarchicalSwarm based AI" - " Agent automation infrastructure" -) - -# Run HierarchicalSwarm -swarm.run(objective) diff --git a/playground/structs/todo_app_example.py b/playground/structs/todo_app_example.py deleted file mode 100644 index 981bf499..00000000 --- a/playground/structs/todo_app_example.py +++ /dev/null @@ -1,19 +0,0 @@ -from swarms import HierarchicalSwarm - -# Retrieve your API key from the environment or replace with your actual key -api_key = "sksdsds" - -# Initialize HierarchicalSwarm with your API key -swarm = HierarchicalSwarm(openai_api_key=api_key) - -# Define an objective -objective = """ -Please develop and serve a simple web TODO app. -The user can list all TODO items and add or delete each TODO item. -I want it to have neumorphism-style. -The ports you can use are 4500 and 6500. - -""" - -# Run HierarchicalSwarm -swarm.run(objective) diff --git a/playground/structs/tool_utils_example.py b/playground/structs/tool_utils_example.py deleted file mode 100644 index ff7e17c2..00000000 --- a/playground/structs/tool_utils_example.py +++ /dev/null @@ -1,19 +0,0 @@ -from swarms.tools.tool import tool -from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs - - -@tool -def search_api(query: str) -> str: - """Search API - - Args: - query (str): _description_ - - Returns: - str: _description_ - """ - print(f"Searching API for {query}") - - -tool_docs = scrape_tool_func_docs(search_api) -print(tool_docs) diff --git a/playground/structs/workflow_example.py b/playground/structs/workflow_example.py deleted file mode 100644 index 0d9f18c4..00000000 --- a/playground/structs/workflow_example.py +++ /dev/null @@ -1,7 +0,0 @@ -from swarms.models import OpenAIChat -from swarms.structs.workflow import Workflow - -llm = OpenAIChat() - - -workflow = Workflow(llm) diff --git a/playground/tools/agent_with_tools_example.py b/playground/tools/agent_with_tools_example.py index 0d736a16..8ebcde69 100644 --- a/playground/tools/agent_with_tools_example.py +++ b/playground/tools/agent_with_tools_example.py @@ -2,8 +2,8 @@ import os from dotenv import load_dotenv -from swarms.models import OpenAIChat -from swarms.structs import Agent +from swarms import OpenAIChat, Agent +from swarms.tools.tool import tool load_dotenv() @@ -12,24 +12,24 @@ api_key = os.environ.get("OPENAI_API_KEY") llm = OpenAIChat(api_key=api_key) -# @tool -# def search_api(query: str) -> str: -# """Search API +@tool +def search_api(query: str) -> str: + """Search API -# Args: -# query (str): _description_ + Args: + query (str): _description_ -# Returns: -# str: _description_ -# """ -# print(f"Searching API for {query}") + Returns: + str: _description_ + """ + print(f"Searching API for {query}") ## Initialize the workflow agent = Agent( llm=llm, max_loops=5, - # tools=[search_api], + tools=[search_api], dashboard=True, ) diff --git a/playground/tools/tool_prompt_scaper_example.py b/playground/tools/tool_prompt_scaper_example.py deleted file mode 100644 index 2c0434d6..00000000 --- a/playground/tools/tool_prompt_scaper_example.py +++ /dev/null @@ -1,22 +0,0 @@ -from swarms.tools.tool import tool -from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs - -# Define a tool by decorating a function with the tool decorator and providing a docstring - - -@tool(return_direct=True) -def search_api(query: str): - """Search the web for the query - - Args: - query (str): _description_ - - Returns: - _type_: _description_ - """ - return f"Search results for {query}" - - -# Scrape the tool func docs to prepare for injection into the agent prompt -out = scrape_tool_func_docs(search_api) -print(out) diff --git a/playground/workflow_example_example.py b/playground/workflow_example_example.py deleted file mode 100644 index 78909dc7..00000000 --- a/playground/workflow_example_example.py +++ /dev/null @@ -1,10 +0,0 @@ -from swarms import Workflow -from swarms.models import ChatOpenAI - -workflow = Workflow(ChatOpenAI) - -workflow.add("What's the weather in miami") -workflow.add("Provide details for {{ parent_output }}") -workflow.add("Summarize the above information: {{ parent_output}}") - -workflow.run() diff --git a/swarms/agents/multion_agent.py b/swarms/agents/multion_agent.py index 760f3251..ecc36cb8 100644 --- a/swarms/agents/multion_agent.py +++ b/swarms/agents/multion_agent.py @@ -37,12 +37,7 @@ class MultiOnAgent(AbstractLLM): self.max_steps = max_steps self.starting_url = starting_url - self.multion = multion.login( - use_api=True, - multion_api_key=str(multion_api_key), - *args, - **kwargs, - ) + def run(self, task: str, *args, **kwargs): """ @@ -56,7 +51,14 @@ class MultiOnAgent(AbstractLLM): Returns: dict: The response from the browsing task. """ - response = self.multion.browse( + multion.login( + use_api=True, + multion_api_key=str(self.multion_api_key), + *args, + **kwargs, + ) + + response = multion.browse( { "cmd": task, "url": self.starting_url,