From ce9d17b508c4d0d85f05f2cf7b29c4e432728141 Mon Sep 17 00:00:00 2001 From: Kye Date: Mon, 3 Jul 2023 20:40:39 -0400 Subject: [PATCH] worker agent --- setup.py | 2 +- swarms/agents/workers/auto_agent.py | 76 ++++++++++++++++------------- 2 files changed, 44 insertions(+), 34 deletions(-) diff --git a/setup.py b/setup.py index 2d0cfdfa..597ea421 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ from setuptools import setup, find_packages setup( name = 'swarms', packages = find_packages(exclude=[]), - version = '0.1.7', + version = '0.1.8', license='MIT', description = 'Swarms - Pytorch', author = 'Kye Gomez', diff --git a/swarms/agents/workers/auto_agent.py b/swarms/agents/workers/auto_agent.py index 2ee77ebb..bb43db03 100644 --- a/swarms/agents/workers/auto_agent.py +++ b/swarms/agents/workers/auto_agent.py @@ -53,55 +53,65 @@ class MultiModalVisualAgentTool(BaseTool): -embeddings_model = OpenAIEmbeddings(openai_api_key="") -embedding_size = 1536 -index = faiss.IndexFlatL2(embedding_size) -vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) +class WorkerAgent: + def __init__(self, objective: str, api_key: str): + self.objective = objective + self.api_key = api_key + self.worker = self.create_agent_worker() + def create_agent_worker(self): + os.environ['OPENAI_API_KEY'] = self.api_key + llm = ChatOpenAI(model_name="gpt-4", temperature=1.0) + embeddings_model = OpenAIEmbeddings() + embedding_size = 1536 + index = faiss.IndexFlatL2(embedding_size) + vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) -query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)) + query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)) + web_search = DuckDuckGoSearchRun() -# !pip install duckduckgo_search -web_search = DuckDuckGoSearchRun() + multimodal_agent = MultiModalVisualAgent() + multimodal_agent_tool = MultiModalVisualAgentTool(multimodal_agent) + tools = [ + web_search, + WriteFileTool(root_dir="./data"), + ReadFileTool(root_dir="./data"), -#MM CHILD AGENT -multimodal_agent = MultiModalVisualAgent() + multimodal_agent_tool, + process_csv, + query_website_tool, + Terminal, -# -multimodal_agent_tool = MultiModalVisualAgentTool(MultiModalVisualAgent) -tools = [ - - web_search, - WriteFileTool(root_dir="./data"), - ReadFileTool(root_dir="./data"), - process_csv, + CodeWriter, + CodeEditor + ] + + agent_worker = AutoGPT.from_llm_and_tools( + ai_name="WorkerX", + ai_role="Assistant", + tools=tools, + llm=llm, + memory=vectorstore.as_retriever(search_kwargs={"k": 8}), + human_in_the_loop=True, + ) + + agent_worker.chain.verbose = True + + return agent_worker - # multimodal_agent_tool, + # objective = "Your objective here" + # api_key = "Your OpenAI API key here" + # worker_agent = WorkerAgent(objective, api_key) - query_website_tool, - Terminal, - CodeWriter, - CodeEditor - # HumanInputRun(), # Activate if you want the permit asking for help from the human -] -agent_worker = AutoGPT.from_llm_and_tools( - ai_name="WorkerX", - ai_role="Assistant", - tools=tools, - llm=llm, - memory=vectorstore.as_retriever(search_kwargs={"k": 8}), - human_in_the_loop=True, # Set to True if you want to add feedback at each step. -) -agent_worker.chain.verbose = True # worker_agent = agent_worker # tree_of_thoughts_prompt = """