From 2c00c679c3f7b97b06b9d5c6b855e8d60f7df674 Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 5 Jul 2023 17:46:29 -0400 Subject: [PATCH] file path errors --- requirements.txt | 2 +- swarms/agents/boss/boss_agent.py | 2 +- swarms/agents/workers/agents.py | 5 +- swarms/agents/workers/worker_agent.py | 3 +- swarms/prompts/__init__.py | 2 +- swarms/swarms.py | 169 +------------------------- swarms/tools/main.py | 68 +++++++++++ 7 files changed, 79 insertions(+), 172 deletions(-) diff --git a/requirements.txt b/requirements.txt index ab774165..0d5f28ee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -59,7 +59,7 @@ fastapi==0.94.1 pydantic==1.10.6 tenacity==8.2.2 -python-dotenv==1.0.0 +python-dotenv boto3 uvicorn==0.21.1 diff --git a/swarms/agents/boss/boss_agent.py b/swarms/agents/boss/boss_agent.py index 7f25827a..6a9d1433 100644 --- a/swarms/agents/boss/boss_agent.py +++ b/swarms/agents/boss/boss_agent.py @@ -1,4 +1,4 @@ - +from swarms.tools.main import * # ---------- Boss Node ---------- class BossNode: diff --git a/swarms/agents/workers/agents.py b/swarms/agents/workers/agents.py index adcaf728..0dd49e15 100644 --- a/swarms/agents/workers/agents.py +++ b/swarms/agents/workers/agents.py @@ -137,7 +137,7 @@ class ToolsFactory: ################ # from core.prompts.input import EVAL_PREFIX, EVAL_SUFFIX -from prompts.prompts import EVAL_PREFIX, EVAL_SUFFIX +from ...prompts.prompts import EVAL_PREFIX, EVAL_SUFFIX ############ @@ -1013,7 +1013,8 @@ from langchain.schema import BaseOutputParser # from core.prompts.input import EVAL_FORMAT_INSTRUCTIONS -from prompts.prompts import EVAL_FORMAT_INSTRUCTIONS +# from prompts.prompts import EVAL_FORMAT_INSTRUCTIONS +from ...prompts.prompts import EVAL_FORMAT_INSTRUCTIONS class EvalOutputParser(BaseOutputParser): diff --git a/swarms/agents/workers/worker_agent.py b/swarms/agents/workers/worker_agent.py index c688af3f..b8cc7355 100644 --- a/swarms/agents/workers/worker_agent.py +++ b/swarms/agents/workers/worker_agent.py @@ -1,5 +1,4 @@ - - +from swarms.tools.main import * # ---------- Worker Node ---------- # Define the input schema for the WorkerNode class WorkerNodeInput(BaseModel): diff --git a/swarms/prompts/__init__.py b/swarms/prompts/__init__.py index 08737260..66229266 100644 --- a/swarms/prompts/__init__.py +++ b/swarms/prompts/__init__.py @@ -1 +1 @@ -"""PROMPTS MULTI MODAL""" \ No newline at end of file +# """PROMPTS MULTI MODAL""" \ No newline at end of file diff --git a/swarms/swarms.py b/swarms/swarms.py index 2146f687..6fe29be6 100644 --- a/swarms/swarms.py +++ b/swarms/swarms.py @@ -1,159 +1,6 @@ -# ---------- Dependencies ---------- -import os -import asyncio -import faiss -from typing import Any, Optional, List -from contextlib import contextmanager - -from pydantic import BaseModel, Field -from langchain import LLMChain, OpenAI, PromptTemplate -from langchain.chains.base import Chain - -from langchain.experimental import BabyAGI -from langchain.embeddings import OpenAIEmbeddings -from langchain.vectorstores.base import VectorStore -from langchain.vectorstores import FAISS - -from langchain.docstore import InMemoryDocstore -from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain -from langchain.agents import ZeroShotAgent, Tool, AgentExecutor - -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.tools import BaseTool, DuckDuckGoSearchRun -from langchain.tools.file_management.read import ReadFileTool - -from langchain.tools.file_management.write import WriteFileTool -from langchain.tools.human.tool import HumanInputRun -from swarms.tools import Terminal, CodeWriter, CodeEditor, process_csv, WebpageQATool - -from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT -from langchain.chat_models import ChatOpenAI -from langchain.tools import tool - - -# ---------- Constants ---------- -ROOT_DIR = "./data/" - -# ---------- Tools ---------- -openai_api_key = os.environ["OPENAI_API_KEY"] -llm = ChatOpenAI(model_name="gpt-4", temperature=1.0, openai_api_key=openai_api_key) - -tools = [ - DuckDuckGoSearchRun(), - WriteFileTool(root_dir=ROOT_DIR), - ReadFileTool(root_dir=ROOT_DIR), - process_csv, - - WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)), - - # Tool(name='terminal', func=Terminal.execute, description='Operates a terminal'), - # Tool(name='code_writer', func=CodeWriter(), description='Writes code'), - # Tool(name='code_editor', func=CodeEditor(), description='Edits code'),# -] - -# ---------- Vector Store ---------- -embeddings_model = OpenAIEmbeddings() -embedding_size = 1536 -index = faiss.IndexFlatL2(embedding_size) -vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) - -# ---------- Worker Node ---------- - -# Define the input schema for the WorkerNode -# Define the input schema for the WorkerNode -class WorkerNodeInput(BaseModel): - llm: Any = Field(description="Language model") - tools: List[Tool] = Field(description="List of tools") - vectorstore: VectorStore = Field(description="Vector store") - - -@tool("WorkerNode", args_schema=WorkerNodeInput) -class WorkerNode: - """Useful for when you need to spawn an autonomous agent instance as a worker to accomplish complex tasks, it can search the internet or spawn child multi-modality models to process and generate images and text or audio and so on """ - def __init__(self, llm, tools, vectorstore): - self.llm = llm - self.tools = tools - self.vectorstore = vectorstore - - def create_agent(self, ai_name, ai_role, human_in_the_loop, search_kwargs): - self.agent = AutoGPT.from_llm_and_tools( - ai_name=ai_name, - ai_role=ai_role, - tools=tools, - llm=self.llm, - memory=self.vectorstore.as_retriever(search_kwargs=search_kwargs), - human_in_the_loop=human_in_the_loop, - ) - self.agent.chain.verbose = True - - def run_agent(self, prompt): - tree_of_thoughts_prompt = """ - Imagine three different experts are answering this question. All experts will write down each chain of thought of each step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. The question is... - """ - self.agent.run([f"{tree_of_thoughts_prompt} {prompt}"]) - - -worker_node = WorkerNode(llm=llm, tools=tools, vectorstore=vectorstore) - -# ---------- Boss Node ---------- -class BossNode: - def __init__(self, llm, vectorstore, task_execution_chain, verbose, max_iterations): - self.llm = llm - self.vectorstore = vectorstore - self.task_execution_chain = task_execution_chain - self.verbose = verbose - self.max_iterations = max_iterations - - self.baby_agi = BabyAGI.from_llm( - llm=self.llm, - vectorstore=self.vectorstore, - task_execution_chain=self.task_execution_chain - ) - - def create_task(self, objective): - return {"objective": objective} - - def execute_task(self, task): - self.baby_agi(task) - -# ---------- Inputs to Boss Node ---------- -todo_prompt = PromptTemplate.from_template( - "You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}""" -) -todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt) - -tools += [ - Tool( - name="TODO", - func=todo_chain.run, - description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!", - ), - Tool( - name="AUTONOMOUS Worker AGENT", - func=WorkerNode.run_agent, - description="Useful for when you need to spawn an autonomous agent instance as a worker to accomplish complex tasks, it can search the internet or spawn child multi-modality models to process and generate images and text or audio and so on" - ) -] - -suffix = """Question: {task} -{agent_scratchpad}""" -prefix = """You are an Boss in a swarm who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}. -""" - -prompt = ZeroShotAgent.create_prompt( - tools, - prefix=prefix, - suffix=suffix, - input_variables=["objective", "task", "context", "agent_scratchpad"], -) - -llm = OpenAI(temperature=0) -llm_chain = LLMChain(llm=llm, prompt=prompt) -tool_names = [tool.name for tool in tools] - -agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) -agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True) -boss_node = BossNode(llm=llm, vectorstore=vectorstore, task_execution_chain=agent_executor, verbose=True, max_iterations=5) +from swarms.tools.main import * +from swarms.agents.workers.worker_agent import WorkerNode +from swarms.agents.boss.boss_agent import BossNode class Swarms: def __init__(self, openai_api_key): @@ -161,7 +8,6 @@ class Swarms: def initialize_llm(self): return ChatOpenAI(model_name="gpt-4", temperature=1.0, openai_api_key=self.openai_api_key) - # def initialize_tools(self, llm): web_search = DuckDuckGoSearchRun() @@ -213,14 +59,7 @@ class Swarms: llm = self.initialize_llm() tools = self.initialize_tools(llm) vectorstore = self.initialize_vectorstore() - worker_node = self.initialize_worker_node(llm, tools, vectorstore) - boss_node = self.initialize_boss_node(llm, vectorstore) - task = boss_node.create_task(objective) - boss_node.execute_task(task) - worker_node.run_agent(objective) - - - + worker_node = self.initialize_worker # class Swarms: diff --git a/swarms/tools/main.py b/swarms/tools/main.py index c08fc731..fc993717 100644 --- a/swarms/tools/main.py +++ b/swarms/tools/main.py @@ -2276,3 +2276,71 @@ list_tool.run({}) # #####==========================================================================> TOOLS # from langchain.tools.human.tool import HumanInputRun # from langchain.tools import BaseTool, DuckDuckGoSearchRun + + + + + + + + + + + +# ---------- Dependencies ---------- +import os +import asyncio +import faiss +from typing import Any, Optional, List +from contextlib import contextmanager + +from pydantic import BaseModel, Field +from langchain import LLMChain, OpenAI, PromptTemplate +from langchain.chains.base import Chain + +from langchain.experimental import BabyAGI +from langchain.embeddings import OpenAIEmbeddings +from langchain.vectorstores.base import VectorStore +from langchain.vectorstores import FAISS + +from langchain.docstore import InMemoryDocstore +from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain +from langchain.agents import ZeroShotAgent, Tool, AgentExecutor + +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain.tools import BaseTool, DuckDuckGoSearchRun +from langchain.tools.file_management.read import ReadFileTool + +from langchain.tools.file_management.write import WriteFileTool +from langchain.tools.human.tool import HumanInputRun +from swarms.tools import Terminal, CodeWriter, CodeEditor, process_csv, WebpageQATool + +from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT +from langchain.chat_models import ChatOpenAI +from langchain.tools import tool + +# ---------- Constants ---------- +ROOT_DIR = "./data/" + +# ---------- Tools ---------- +openai_api_key = os.environ["OPENAI_API_KEY"] +llm = ChatOpenAI(model_name="gpt-4", temperature=1.0, openai_api_key=openai_api_key) + +tools = [ + DuckDuckGoSearchRun(), + WriteFileTool(root_dir=ROOT_DIR), + ReadFileTool(root_dir=ROOT_DIR), + process_csv, + + WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)), + + # Tool(name='terminal', func=Terminal.execute, description='Operates a terminal'), + # Tool(name='code_writer', func=CodeWriter(), description='Writes code'), + # Tool(name='code_editor', func=CodeEditor(), description='Edits code'),# +] + +# ---------- Vector Store ---------- +embeddings_model = OpenAIEmbeddings() +embedding_size = 1536 +index = faiss.IndexFlatL2(embedding_size) +vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) \ No newline at end of file