From 39ffd6acbee791b02a032389497896847bf3069a Mon Sep 17 00:00:00 2001 From: Kye Date: Mon, 3 Jul 2023 18:41:59 -0400 Subject: [PATCH] clean up --- setup.py | 2 +- swarms/agents/swarms.py | 176 ++++++++++++++-------------- swarms/agents/workers/auto_agent.py | 5 +- 3 files changed, 94 insertions(+), 89 deletions(-) diff --git a/setup.py b/setup.py index 8b4c195b..b6c9241b 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ from setuptools import setup, find_packages setup( name = 'swarms', packages = find_packages(exclude=[]), - version = '0.1.0', + version = '0.1.1', license='MIT', description = 'Swarms - Pytorch', author = 'Kye Gomez', diff --git a/swarms/agents/swarms.py b/swarms/agents/swarms.py index 7e0ceb73..9cec9253 100644 --- a/swarms/agents/swarms.py +++ b/swarms/agents/swarms.py @@ -21,18 +21,10 @@ from langchain.agents import ZeroShotAgent, Tool, AgentExecutor from langchain import OpenAI, SerpAPIWrapper, LLMChain -from swarms.agents.workers.auto_agent import worker_agent -worker_agent = worker_agent - # Define your embedding model -embeddings_model = OpenAIEmbeddings() +# embeddings_model = OpenAIEmbeddings() # Initialize the vectorstore as empty import faiss - -embedding_size = 1536 -index = faiss.IndexFlatL2(embedding_size) -vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) - # from swarms.agents.workers.auto_agent import AutoGPT @@ -123,11 +115,23 @@ embeddings_model = OpenAIEmbeddings(openai_api_key="") embedding_size = 1536 index = faiss.IndexFlatL2(embedding_size) vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) +####################################################################### => Worker Node + + +worker_agent = AutoGPT.from_llm_and_tools( + ai_name="WorkerX", + ai_role="Assistant", + tools=tools, + llm=llm, + memory=vectorstore.as_retriever(search_kwargs={"k": 8}), + human_in_the_loop=True, # Set to True if you want to add feedback at each step. +) + +worker_agent.chain.verbose = True -####################################################################### => Worker Node class WorkerNode: def __init__(self, llm, tools, vectorstore): @@ -170,101 +174,101 @@ worker_node.run_agent("Find 20 potential customers for a Swarms based AI Agent a #======================================> WorkerNode -class MetaWorkerNode: - def __init__(self, llm, tools, vectorstore): - self.llm = llm - self.tools = tools - self.vectorstore = vectorstore +# class MetaWorkerNode: +# def __init__(self, llm, tools, vectorstore): +# self.llm = llm +# self.tools = tools +# self.vectorstore = vectorstore - self.agent = None - self.meta_chain = None +# self.agent = None +# self.meta_chain = None - def init_chain(self, instructions): - self.agent = WorkerNode(self.llm, self.tools, self.vectorstore) - self.agent.create_agent("Assistant", "Assistant Role", False, {}) +# def init_chain(self, instructions): +# self.agent = WorkerNode(self.llm, self.tools, self.vectorstore) +# self.agent.create_agent("Assistant", "Assistant Role", False, {}) - def initialize_meta_chain(): - meta_template = """ - Assistant has just had the below interactions with a User. Assistant followed their "Instructions" closely. Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would quickly and correctly respond in the future. +# def initialize_meta_chain(): +# meta_template = """ +# Assistant has just had the below interactions with a User. Assistant followed their "Instructions" closely. Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would quickly and correctly respond in the future. - #### +# #### - {chat_history} +# {chat_history} - #### +# #### - Please reflect on these interactions. +# Please reflect on these interactions. - You should first critique Assistant's performance. What could Assistant have done better? What should the Assistant remember about this user? Are there things this user always wants? Indicate this with "Critique: ...". +# You should first critique Assistant's performance. What could Assistant have done better? What should the Assistant remember about this user? Are there things this user always wants? Indicate this with "Critique: ...". - You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. Don't forget any important details in the current Instructions! Indicate the new Instructions by "Instructions: ...". - """ +# You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. Don't forget any important details in the current Instructions! Indicate the new Instructions by "Instructions: ...". +# """ - meta_prompt = PromptTemplate( - input_variables=["chat_history"], template=meta_template - ) +# meta_prompt = PromptTemplate( +# input_variables=["chat_history"], template=meta_template +# ) - meta_chain = LLMChain( - llm=OpenAI(temperature=0), - prompt=meta_prompt, - verbose=True, - ) - return meta_chain +# meta_chain = LLMChain( +# llm=OpenAI(temperature=0), +# prompt=meta_prompt, +# verbose=True, +# ) +# return meta_chain - def meta_chain(self): - #define meta template and meta prompting as per your needs - self.meta_chain = initialize_meta_chain() +# def meta_chain(self): +# #define meta template and meta prompting as per your needs +# self.meta_chain = initialize_meta_chain() - def get_chat_history(chain_memory): - memory_key = chain_memory.memory_key - chat_history = chain_memory.load_memory_variables(memory_key)[memory_key] - return chat_history +# def get_chat_history(chain_memory): +# memory_key = chain_memory.memory_key +# chat_history = chain_memory.load_memory_variables(memory_key)[memory_key] +# return chat_history - def get_new_instructions(meta_output): - delimiter = "Instructions: " - new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :] - return new_instructions +# def get_new_instructions(meta_output): +# delimiter = "Instructions: " +# new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :] +# return new_instructions - def main(self, task, max_iters=3, max_meta_iters=5): - failed_phrase = "task failed" - success_phrase = "task succeeded" - key_phrases = [success_phrase, failed_phrase] +# def main(self, task, max_iters=3, max_meta_iters=5): +# failed_phrase = "task failed" +# success_phrase = "task succeeded" +# key_phrases = [success_phrase, failed_phrase] - instructions = "None" - for i in range(max_meta_iters): - print(f"[Episode {i+1}/{max_meta_iters}]") - self.initialize_chain(instructions) - output = self.agent.perform('Assistant', {'request': task}) - for j in range(max_iters): - print(f"(Step {j+1}/{max_iters})") - print(f"Assistant: {output}") - print(f"Human: ") - human_input = input() - if any(phrase in human_input.lower() for phrase in key_phrases): - break - output = self.agent.perform('Assistant', {'request': human_input}) - if success_phrase in human_input.lower(): - print(f"You succeeded! Thanks for playing!") - return - self.initialize_meta_chain() - meta_output = self.meta_chain.predict(chat_history=self.get_chat_history()) - print(f"Feedback: {meta_output}") - instructions = self.get_new_instructions(meta_output) - print(f"New Instructions: {instructions}") - print("\n" + "#" * 80 + "\n") - print(f"You failed! Thanks for playing!") +# instructions = "None" +# for i in range(max_meta_iters): +# print(f"[Episode {i+1}/{max_meta_iters}]") +# self.initialize_chain(instructions) +# output = self.agent.perform('Assistant', {'request': task}) +# for j in range(max_iters): +# print(f"(Step {j+1}/{max_iters})") +# print(f"Assistant: {output}") +# print(f"Human: ") +# human_input = input() +# if any(phrase in human_input.lower() for phrase in key_phrases): +# break +# output = self.agent.perform('Assistant', {'request': human_input}) +# if success_phrase in human_input.lower(): +# print(f"You succeeded! Thanks for playing!") +# return +# self.initialize_meta_chain() +# meta_output = self.meta_chain.predict(chat_history=self.get_chat_history()) +# print(f"Feedback: {meta_output}") +# instructions = self.get_new_instructions(meta_output) +# print(f"New Instructions: {instructions}") +# print("\n" + "#" * 80 + "\n") +# print(f"You failed! Thanks for playing!") -#init instance of MetaWorkerNode -meta_worker_node = MetaWorkerNode(llm=OpenAI, tools=tools, vectorstore=vectorstore) +# #init instance of MetaWorkerNode +# meta_worker_node = MetaWorkerNode(llm=OpenAI, tools=tools, vectorstore=vectorstore) -#specify a task and interact with the agent -task = "Provide a sysmatic argument for why we should always eat past with olives" -meta_worker_node.main(task) +# #specify a task and interact with the agent +# task = "Provide a sysmatic argument for why we should always eat past with olives" +# meta_worker_node.main(task) ####################################################################### => Boss Node @@ -356,11 +360,11 @@ agent_executor = AgentExecutor.from_agent_and_tools( boss_node = BossNode(llm=llm, vectorstore=vectorstore, task_execution_chain=agent_executor, verbose=True, max_iterations=5) -#create a task -task = boss_node.create_task(objective="Write a research paper on the impact of climate change on global agriculture") +# #create a task +# task = boss_node.create_task(objective="Write a research paper on the impact of climate change on global agriculture") -#execute the task -boss_node.execute_task(task) +# #execute the task +# boss_node.execute_task(task) class Swarms: diff --git a/swarms/agents/workers/auto_agent.py b/swarms/agents/workers/auto_agent.py index a62082e1..2ee77ebb 100644 --- a/swarms/agents/workers/auto_agent.py +++ b/swarms/agents/workers/auto_agent.py @@ -80,6 +80,8 @@ tools = [ ReadFileTool(root_dir="./data"), process_csv, + # multimodal_agent_tool, + query_website_tool, Terminal, @@ -101,8 +103,7 @@ agent_worker = AutoGPT.from_llm_and_tools( agent_worker.chain.verbose = True -worker_agent = agent_worker - +# worker_agent = agent_worker # tree_of_thoughts_prompt = """ # Imagine three different experts are answering this question. All experts will write down each chain of thought of each step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. The question is...