From c3ae804f9fe9bc3390be59225c86de688e37f262 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 27 Jul 2023 16:18:05 -0400 Subject: [PATCH] clean up --- swarms/__init__.py | 2 +- swarms/agents/agent.py | 105 ++++++++++++++++++++++------------------- swarms/swarms.py | 28 ++++++----- 3 files changed, 75 insertions(+), 60 deletions(-) diff --git a/swarms/__init__.py b/swarms/__init__.py index fcbe3f62..6217e211 100644 --- a/swarms/__init__.py +++ b/swarms/__init__.py @@ -1,5 +1,5 @@ # from swarms import Swarms, swarm from swarms.swarms import HierarchicalSwarm, swarm -# from swarms.workers.worker_ultra_node import WorkerUltraNode, WorkerUltra, worker_ultra_node +# from swarms.workers.worker_ultra_node import WorkerUltraNode, WorkerUltra from swarms.workers.worker_node import WorkerNode, worker_node from swarms.boss.boss_node import BossNode \ No newline at end of file diff --git a/swarms/agents/agent.py b/swarms/agents/agent.py index ca680e60..57e5b059 100644 --- a/swarms/agents/agent.py +++ b/swarms/agents/agent.py @@ -16,83 +16,93 @@ from swarms.agents.models.hf import HuggingFaceLLM logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + class AgentNodeInitializer: - """Useful for when you need to spawn an autonomous agent instance as a agent to accomplish complex tasks, it can search the internet or spawn child multi-modality models to process and generate images and text or audio and so on""" + """Useful for spawning autonomous agent instances to accomplish complex tasks.""" def __init__(self, - llm, - tools, - vectorstore, - temperature, - model_type: str=None, - human_in_the_loop=True, - model_id: str = None, - embedding_size: int = 8192, - system_prompt: str = None, - max_iterations: int = None): - - - if not llm or not tools or not vectorstore: - logging.error("llm, tools, and vectorstore cannot be None.") - raise ValueError("llm, tools, and vectorstore cannot be None.") - - self.llm = llm - self.tools = tools - self.vectorstore = vectorstore - - self.agent = None + llm: Optional[Any] = None, + tools: Optional[List[BaseTool]] = None, + vectorstore: Optional[List[Any]] = None, + temperature: float = 0.5, + model_type: Optional[str] = None, + human_in_the_loop: bool = True, + model_id: Optional[str] = None, + embedding_size: int = 8192, + system_prompt: Optional[str] = None, + max_iterations: Optional[int] = None, + agent_name: Optional[str] = None, + agent_role: Optional[str] = None, + verbose: bool = False, + openai_api_key: Optional[str] = None): + + if not openai_api_key and (model_type is None or model_type.lower() == 'openai'): + raise ValueError("OpenAI API key cannot be None when model_type is 'openai'") + + self.llm = llm or self.initialize_llm(model_type, model_id, openai_api_key, temperature) + self.tools = tools or [] + self.vectorstore = vectorstore or [] + self.temperature = temperature self.model_type = model_type - self.human_in_the_loop = human_in_the_loop - self.prompt = prompt - self.model_id = model_id + self.model_id = model_id self.embedding_size = embedding_size - self.system_prompt system_prompt - + self.system_prompt = system_prompt + self.agent_name = agent_name + self.agent_role = agent_role + self.verbose = verbose + self.openai_api_key = openai_api_key + self.agent = None + + self.initialize_agent() + def initialize_llm(self, model_type: str, model_id: str, openai_api_key: str, temperature: float): + try: + if model_type.lower() == 'openai': + return ChatOpenAI(openai_api_key=openai_api_key, temperature=temperature) + elif model_type.lower() == 'huggingface': + return HuggingFaceLLM(model_id=model_id, temperature=temperature) + else: + raise ValueError("Invalid model_type. It should be either 'openai' or 'huggingface'") + except Exception as e: + logger.error(f"Failed to initialize language model: {e}") + raise e - def create_agent(self, ai_name="Swarm Agent AI Assistant", ai_role="Assistant", human_in_the_loop=True, search_kwargs={}, verbose=False): - logging.info("Creating agent in AgentNode") + def initialize_agent(self): try: self.agent = AutoGPT.from_llm_and_tools( - ai_name=ai_name, - ai_role=ai_role, + ai_name=self.agent_name, + ai_role=self.agent_role, tools=self.tools, llm=self.llm, - memory=self.vectorstore.as_retriever(search_kwargs=search_kwargs), - human_in_the_loop=human_in_the_loop, + memory=self.vectorstore.as_retriever(search_kwargs={}), + human_in_the_loop=self.human_in_the_loop, chat_history_memory=FileChatMessageHistory("chat_history.txt"), + verbose=self.verbose, ) - # self.agent.chain.verbose = verbose except Exception as e: - logging.error(f"Error while creating agent: {str(e)}") + logger.error(f"Error while creating agent: {str(e)}") raise e - def add_tool(self, tool: Tool): - if not isinstance(tool, Tool): - logging.error("Tool must be an instance of Tool.") - raise TypeError("Tool must be an instance of Tool.") - + def add_tool(self, tool: BaseTool): + if not isinstance(tool, BaseTool): + logger.error("Tool must be an instance of BaseTool.") + raise TypeError("Tool must be an instance of BaseTool.") self.tools.append(tool) def run(self, prompt: str) -> str: - if not isinstance(prompt, str): - logging.error("Prompt must be a string.") - raise TypeError("Prompt must be a string.") - if not prompt: - logging.error("Prompt is empty.") + logger.error("Prompt is empty.") raise ValueError("Prompt is empty.") - try: self.agent.run([f"{prompt}"]) return "Task completed by AgentNode" except Exception as e: - logging.error(f"While running the agent: {str(e)}") + logger.error(f"While running the agent: {str(e)}") raise e @@ -121,7 +131,6 @@ class AgentNode: except Exception as e: logging.error(f"Failed to initialize language model: {e}") - def initialize_tools(self, llm_class): if not llm_class: logging.error("llm_class not cannot be none") diff --git a/swarms/swarms.py b/swarms/swarms.py index 2b8b5c00..9270e42a 100644 --- a/swarms/swarms.py +++ b/swarms/swarms.py @@ -20,13 +20,19 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %( # TODO: Off class HierarchicalSwarm: - def __init__(self, model_id: str = None, - openai_api_key="", - use_vectorstore=True, embedding_size: int = None, use_async=True, - human_in_the_loop=True, model_type: str = None, boss_prompt: str = None, - worker_prompt:str = None, - temperature=None, - max_iterations=None, + def __init__(self, + model_id: str = None, + openai_api_key="", + use_vectorstore=True, + embedding_size: int = None, + use_async=True, + human_in_the_loop=True, + model_type: str = None, + boss_prompt: str = None, + worker_prompt:str = None, + temperature=None, + max_iterations=None, + log_level: str = 'INFO' ): #openai_api_key: the openai key. Default is empty if not model_id: @@ -63,8 +69,8 @@ class HierarchicalSwarm: """ try: # Initialize language model - if self.llm_class == 'openai' or OpenAI: - return llm_class(openai_api_key=self.openai_api_key, temperature=self.temperature) + if self.llm_class == 'openai': + return OpenAI(openai_api_key=self.openai_api_key, temperature=self.temperature) elif self.model_type == "huggingface": return HuggingFaceLLM(model_id=self.model_id, temperature=self.temperature) except Exception as e: @@ -109,7 +115,7 @@ class HierarchicalSwarm: """ try: embeddings_model = OpenAIEmbeddings(openai_api_key=self.openai_api_key) - embedding_size = self.embedding_size or 8192 + embedding_size = self.embedding_size index = faiss.IndexFlatL2(embedding_size) return FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) @@ -232,7 +238,7 @@ def swarm(api_key="", objective="", model_type="", model_id=""): logging.error("Invalid objective") raise ValueError("A valid objective is required") try: - swarms = HierarchicalSwarm(api_key, model_id, use_async=False, model_type=model_type) # Turn off async + swarms = HierarchicalSwarm(api_key, model_id=model_type, use_async=False, model_type=model_type) # Turn off async result = swarms.run(objective) if result is None: logging.error("Failed to run swarms")