From 7fb1374238ba7152e24fc1ad6e38ef06551b628d Mon Sep 17 00:00:00 2001 From: Kye Date: Mon, 26 Jun 2023 18:58:01 -0400 Subject: [PATCH] baby agi agent --- swarms/agents/README.MD | 7 ++++ swarms/agents/auto_agent.py | 1 - swarms/agents/babyagi.py | 84 +++++++++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 swarms/agents/babyagi.py diff --git a/swarms/agents/README.MD b/swarms/agents/README.MD index 527a46ce..2283e91e 100644 --- a/swarms/agents/README.MD +++ b/swarms/agents/README.MD @@ -117,3 +117,10 @@ def evaluate_results(CompAs): ``` Note: In the real world, the complexity of the architecture and requirements will significantly exceed what is presented here. These examples provide a basic starting point but should be expanded upon based on the specifics of the task or problem you're trying to solve. + + + +# Swarms + +BabyAGI -> Autogpt's -> tools -> other agents + \ No newline at end of file diff --git a/swarms/agents/auto_agent.py b/swarms/agents/auto_agent.py index 0c036c75..7c56bb2b 100644 --- a/swarms/agents/auto_agent.py +++ b/swarms/agents/auto_agent.py @@ -188,7 +188,6 @@ agent = AutoGPT.from_llm_and_tools( tools=tools, llm=llm, memory=vectorstore.as_retriever(search_kwargs={"k": 8}), - # prompt="" human_in_the_loop=True, # Set to True if you want to add feedback at each step. ) diff --git a/swarms/agents/babyagi.py b/swarms/agents/babyagi.py new file mode 100644 index 00000000..e6f3f8b4 --- /dev/null +++ b/swarms/agents/babyagi.py @@ -0,0 +1,84 @@ +import os +from collections import deque +from typing import Dict, List, Optional, Any + +from langchain import LLMChain, OpenAI, PromptTemplate +from langchain.embeddings import OpenAIEmbeddings +from langchain.llms import BaseLLM +from langchain.vectorstores.base import VectorStore +from pydantic import BaseModel, Field +from langchain.chains.base import Chain +from langchain.experimental import BabyAGI + +from langchain.vectorstores import FAISS +from langchain.docstore import InMemoryDocstore + +from langchain.agents import ZeroShotAgent, Tool, AgentExecutor +from langchain import OpenAI, SerpAPIWrapper, LLMChain + + +# Define your embedding model +embeddings_model = OpenAIEmbeddings() +# Initialize the vectorstore as empty +import faiss + +embedding_size = 1536 +index = faiss.IndexFlatL2(embedding_size) +vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) + + +todo_prompt = PromptTemplate.from_template( + "You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}" +) +todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt) +search = SerpAPIWrapper() +tools = [ + Tool( + name="Search", + func=search.run, + description="useful for when you need to answer questions about current events", + ), + Tool( + name="TODO", + func=todo_chain.run, + description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!", + ), +] + + +prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.""" +suffix = """Question: {task} +{agent_scratchpad}""" +prompt = ZeroShotAgent.create_prompt( + tools, + prefix=prefix, + suffix=suffix, + input_variables=["objective", "task", "context", "agent_scratchpad"], +) + +llm = OpenAI(temperature=0) +llm_chain = LLMChain(llm=llm, prompt=prompt) +tool_names = [tool.name for tool in tools] + +agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) +agent_executor = AgentExecutor.from_agent_and_tools( + agent=agent, tools=tools, verbose=True +) + +OBJECTIVE = "Write a weather report for SF today" + + +# Logging of LLMChains +verbose = False +# If None, will keep on going forever +max_iterations: Optional[int] = 3 +baby_agi = BabyAGI.from_llm( + llm=llm, + vectorstore=vectorstore, + task_execution_chain=agent_executor, + verbose=verbose, + max_iterations=max_iterations, +) + +baby_agi({"objective": OBJECTIVE}) +