From e8194ede114e98e5f30fcd27006fc87bf2cee498 Mon Sep 17 00:00:00 2001 From: Kye Date: Fri, 6 Oct 2023 12:58:47 -0400 Subject: [PATCH] clean up Former-commit-id: 84cf977b54ee008e4d2d33913c2f3f080eb854c0 --- swarms/agents/__init__.py | 2 +- swarms/agents/aot.py | 20 ++++++++++---------- swarms/swarms/groupchat.py | 18 +++++++++--------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/swarms/agents/__init__.py b/swarms/agents/__init__.py index d23eb15c..3b0112c9 100644 --- a/swarms/agents/__init__.py +++ b/swarms/agents/__init__.py @@ -12,4 +12,4 @@ from swarms.agents.omni_modal_agent import OmniModalAgent #utils from swarms.agents.message import Message from swarms.agents.stream_response import stream -from swarms.agents.base import AbstractAgent \ No newline at end of file +# from swarms.agents.base import AbstractAgent \ No newline at end of file diff --git a/swarms/agents/aot.py b/swarms/agents/aot.py index 81a424a8..4eec3cb1 100644 --- a/swarms/agents/aot.py +++ b/swarms/agents/aot.py @@ -2,6 +2,8 @@ import logging import os import time +import openai + logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) @@ -17,7 +19,7 @@ class OpenAI: if api_key == "" or api_key is None: api_key = os.environ.get("OPENAI_API_KEY", "") if api_key != "": - llm.api_key = api_key + openai.api_key = api_key else: raise Exception("Please provide OpenAI API key") @@ -25,7 +27,7 @@ class OpenAI: api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1" if api_base != "": # e.g. https://api.openai.com/v1/ or your custom url - llm.api_base = api_base + openai.api_base = api_base print(f'Using custom api_base {api_base}') if api_model == "" or api_model is None: @@ -57,14 +59,14 @@ class OpenAI: "content": prompt } ] - response = llm.ChatCompletion.create( + response = openai.ChatCompletion.create( model=self.api_model, messages=messages, max_tokens=max_tokens, temperature=temperature, ) else: - response = llm.Completion.create( + response = openai.Completion.create( engine=self.api_model, prompt=prompt, n=k, @@ -75,7 +77,7 @@ class OpenAI: with open("openai.logs", 'a') as log_file: log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n") return response - except llm.error.RateLimitError as e: + except openai.error.RateLimitError as e: sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30) print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT') time.sleep(sleep_duratoin) @@ -110,9 +112,9 @@ class OpenAI: rejected_solutions=None ): if (type(state) == str): - pass + state_text = state else: - '\n'.join(state) + state_text = '\n'.join(state) print("New state generating thought:", state, "\n\n") prompt = f""" Accomplish the task below by decomposing it as many very explicit subtasks as possible, be very explicit and thorough denoted by @@ -150,8 +152,7 @@ class OpenAI: while taking rejected solutions into account and learning from them. Considering the reasoning provided:\n\n ###'{state_text}'\n\n### - Devise the best possible solution for the task: {initial_prompt}, - Here are evaluated solutions that were rejected: + Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected: ###{rejected_solutions}###, complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them.""" answer = self.generate_text(prompt, 1) @@ -192,7 +193,6 @@ class OpenAI: else: raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.") - class AoTAgent: def __init__( self, diff --git a/swarms/swarms/groupchat.py b/swarms/swarms/groupchat.py index 6c26c0c3..337e3bb9 100644 --- a/swarms/swarms/groupchat.py +++ b/swarms/swarms/groupchat.py @@ -2,13 +2,13 @@ import sys from dataclasses import dataclass from typing import Dict, List, Optional, Union -from swarms.agents.base import AbstractAgent +from swarms.workers.worker import Worker @dataclass class GroupChat: """A group chat with multiple participants with a list of agents and a max number of rounds""" - agents: List[AbstractAgent] + agents: List[Worker] messages: List[Dict] max_rounds: int = 10 admin_name: str = "Admin" #admin agent @@ -21,11 +21,11 @@ class GroupChat: def reset(self): self.messages.clear() - def agent_by_name(self, name: str) -> AbstractAgent: + def agent_by_name(self, name: str) -> Worker: """Find the next speaker baed on the message""" return self.agents[self.agent_names.index(name)] - def next_agent(self, agent: AbstractAgent) -> AbstractAgent: + def next_agent(self, agent: Worker) -> Worker: """Returns the next agent in the list""" return self.agents[ (self.agents_names.index(agent.ai_name) + 1) % len(self.agents) @@ -44,8 +44,8 @@ class GroupChat: def select_speaker( self, - last_speaker: AbstractAgent, - selector: AbstractAgent, + last_speaker: Worker, + selector: Worker, ): """Selects the next speaker""" selector.update_system_message(self.select_speaker_msg()) @@ -72,7 +72,7 @@ class GroupChat: -class GroupChatManager(AbstractAgent): +class GroupChatManager(Worker): def __init__( self, groupchat: GroupChat, @@ -91,7 +91,7 @@ class GroupChatManager(AbstractAgent): **kwargs ) self.register_reply( - AbstractAgent, + Worker, GroupChatManager.run_chat, config=groupchat, reset_config=GroupChat.reset @@ -100,7 +100,7 @@ class GroupChatManager(AbstractAgent): def run( self, messages: Optional[List[Dict]] = None, - sender: Optional[AbstractAgent] = None, + sender: Optional[Worker] = None, config: Optional[GroupChat] = None, ) -> Union[str, Dict, None]: #run