diff --git a/.github/workflows/codacy.yml b/.github/workflows/codacy.yml index b3f4315e..dc713930 100644 --- a/.github/workflows/codacy.yml +++ b/.github/workflows/codacy.yml @@ -12,7 +12,7 @@ # For more information on Codacy Analysis CLI in general, see # https://github.com/codacy/codacy-analysis-cli. -name: Codacy Security Scan +name: Codacy on: push: branches: ["master"] diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b66aa6c3..de94af36 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,5 +1,5 @@ --- -name: Docs WorkAgent +name: Documentation on: push: branches: @@ -18,9 +18,3 @@ jobs: - run: pip install mkdocs-glightbox - run: pip install "mkdocstrings[python]" - run: mkdocs gh-deploy --force - preview: - runs-on: ubuntu-latest - steps: - - uses: readthedocs/actions/preview@v1 - with: - project-slug: swarms diff --git a/.github/workflows/docs_preview.yml b/.github/workflows/docs_preview.yml new file mode 100644 index 00000000..037fd35e --- /dev/null +++ b/.github/workflows/docs_preview.yml @@ -0,0 +1,16 @@ +name: Documentation Links +on: + pull_request_target: + types: + - opened + +permissions: + pull-requests: write + +jobs: + documentation-links: + runs-on: ubuntu-latest + steps: + - uses: readthedocs/actions/preview@v1 + with: + project-slug: "swarms" diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 47120202..a1ab3b31 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -4,11 +4,11 @@ # You can adjust the behavior by modifying this file. # For more information, see: # https://github.com/actions/stale -name: Mark stale issues and pull requests +name: Stale on: schedule: # Scheduled to run at 1.30 UTC everyday - - cron: '30 1 * * *' + - cron: "0 0 * * *" jobs: stale: runs-on: ubuntu-latest @@ -44,6 +44,6 @@ jobs: Please open a new pull request if you need further assistance. Thanks! # Label that can be assigned to issues to exclude them from being marked as stale - exempt-issue-labels: 'override-stale' + exempt-issue-labels: "override-stale" # Label that can be assigned to PRs to exclude them from being marked as stale exempt-pr-labels: "override-stale" diff --git a/.github/workflows/welcome.yml b/.github/workflows/welcome.yml index 0e13ef36..dd16f9c3 100644 --- a/.github/workflows/welcome.yml +++ b/.github/workflows/welcome.yml @@ -1,5 +1,5 @@ --- -name: Welcome WorkAgent +name: Welcome on: issues: types: [opened] @@ -14,7 +14,9 @@ jobs: - uses: actions/first-interaction@v1.3.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - issue-message: "Hello there, thank you for opening an Issue ! 🙏🏻 The team + issue-message: + "Hello there, thank you for opening an Issue ! 🙏🏻 The team was notified and they will get back to you asap." - pr-message: "Hello there, thank you for opening an PR ! 🙏🏻 The team was + pr-message: + "Hello there, thank you for opening an PR ! 🙏🏻 The team was notified and they will get back to you asap." diff --git a/example.py b/example.py index 6b4b8536..c254cb99 100644 --- a/example.py +++ b/example.py @@ -1,6 +1,6 @@ from swarms import Agent, Anthropic -## Initialize the workflow +# Initialize the workflow agent = Agent( agent_name="Transcript Generator", agent_description=( diff --git a/playground/agents/amazon_review_agent.py b/playground/agents/amazon_review_agent.py index 3fb3bc40..f07be9cb 100644 --- a/playground/agents/amazon_review_agent.py +++ b/playground/agents/amazon_review_agent.py @@ -1,6 +1,6 @@ from swarms import Agent, OpenAIChat -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=OpenAIChat(), max_loops="auto", diff --git a/playground/agents/full_stack_agent.py b/playground/agents/full_stack_agent.py index 510f5c98..34bd69c9 100644 --- a/playground/agents/full_stack_agent.py +++ b/playground/agents/full_stack_agent.py @@ -10,7 +10,7 @@ def search_api(query: str, max_results: int = 10): return f"Search API: {query} -> {max_results} results" -## Initialize the workflow +# Initialize the workflow agent = Agent( agent_name="Youtube Transcript Generator", agent_description=( diff --git a/playground/agents/multi_modal_auto_agent_example.py b/playground/agents/multi_modal_auto_agent_example.py index 65f8fa2b..15144257 100644 --- a/playground/agents/multi_modal_auto_agent_example.py +++ b/playground/agents/multi_modal_auto_agent_example.py @@ -21,7 +21,7 @@ llm = GPT4VisionAPI( task = "What is the color of the object?" img = "images/swarms.jpeg" -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=llm, max_loops="auto", diff --git a/playground/demos/assembly/assembly_example.py b/playground/demos/assembly/assembly_example.py index 7ac97ab0..e03f1d09 100644 --- a/playground/demos/assembly/assembly_example.py +++ b/playground/demos/assembly/assembly_example.py @@ -11,7 +11,7 @@ task = ( ) img = "assembly_line.jpg" -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=llm, max_loops=1, diff --git a/playground/demos/jarvis_multi_modal_auto_agent/jarvis_example.py b/playground/demos/jarvis_multi_modal_auto_agent/jarvis_example.py index cce61fba..fa43393e 100644 --- a/playground/demos/jarvis_multi_modal_auto_agent/jarvis_example.py +++ b/playground/demos/jarvis_multi_modal_auto_agent/jarvis_example.py @@ -9,7 +9,7 @@ llm = GPT4VisionAPI() task = "What is the color of the object?" img = "images/swarms.jpeg" -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=llm, sop=MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1, diff --git a/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py b/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py index 007776ac..9840590c 100644 --- a/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py +++ b/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py @@ -6,7 +6,7 @@ llm = GPT4VisionAPI() task = "What is the color of the object?" img = "images/swarms.jpeg" -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=llm, max_loops="auto", diff --git a/playground/demos/multi_modal_chain_of_thought/vcot_example.py b/playground/demos/multi_modal_chain_of_thought/vcot_example.py index 50a02c3d..7be417b7 100644 --- a/playground/demos/multi_modal_chain_of_thought/vcot_example.py +++ b/playground/demos/multi_modal_chain_of_thought/vcot_example.py @@ -22,7 +22,7 @@ llm = GPT4VisionAPI( task = "This is an eye test. What do you see?" img = "playground/demos/multi_modal_chain_of_thought/eyetest.jpg" -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=llm, max_loops=2, diff --git a/playground/demos/multimodal_tot/idea2img_example.py b/playground/demos/multimodal_tot/idea2img_example.py index 4a6c1da3..85e05531 100644 --- a/playground/demos/multimodal_tot/idea2img_example.py +++ b/playground/demos/multimodal_tot/idea2img_example.py @@ -171,7 +171,7 @@ if st.button("Generate Image"): for i, (enriched_prompt, img_path, analysis) in enumerate( results ): - st.write(f"Iteration {i+1}:") + st.write(f"Iteration {i + 1}:") st.write("Enriched Prompt:", enriched_prompt) if img_path: st.image(img_path, caption="Generated Image") diff --git a/playground/demos/visuo/text_to_sql_agent_example.py b/playground/demos/visuo/text_to_sql_agent_example.py index 67f53e97..dedb8324 100644 --- a/playground/demos/visuo/text_to_sql_agent_example.py +++ b/playground/demos/visuo/text_to_sql_agent_example.py @@ -19,7 +19,7 @@ llm = HuggingfaceLLM( temperature=0.5, ) -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=llm, max_loops="auto", diff --git a/playground/examples/example_agent.py b/playground/examples/example_agent.py index 419f4622..88e2c0bd 100644 --- a/playground/examples/example_agent.py +++ b/playground/examples/example_agent.py @@ -26,7 +26,7 @@ print( f" {sys.stderr}" ) -## Initialize the workflow +# Initialize the workflow agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True) # Run the workflow on a task diff --git a/playground/examples/example_swarmnetwork.py b/playground/examples/example_swarmnetwork.py index 1675ca8b..87e5008e 100644 --- a/playground/examples/example_swarmnetwork.py +++ b/playground/examples/example_swarmnetwork.py @@ -17,7 +17,7 @@ llm = OpenAIChat( openai_api_key=api_key, ) -## Initialize the workflow +# Initialize the workflow agent = Agent(llm=llm, max_loops=1, agent_name="Social Media Manager") agent2 = Agent(llm=llm, max_loops=1, agent_name=" Product Manager") agent3 = Agent(llm=llm, max_loops=1, agent_name="SEO Manager") diff --git a/playground/structs/agent_basic_customize.py b/playground/structs/agent_basic_customize.py index 76b6f178..fdab3a70 100644 --- a/playground/structs/agent_basic_customize.py +++ b/playground/structs/agent_basic_customize.py @@ -11,7 +11,7 @@ llm = OpenAIChat( # max_tokens=100, ) -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=llm, max_loops=2, diff --git a/playground/structs/agent_with_longterm_memory.py b/playground/structs/agent_with_longterm_memory.py index 588d6546..7a7b2c86 100644 --- a/playground/structs/agent_with_longterm_memory.py +++ b/playground/structs/agent_with_longterm_memory.py @@ -26,7 +26,7 @@ llm = OpenAIChat( max_tokens=1000, ) -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=llm, max_loops=4, diff --git a/playground/structs/agent_with_tools_example.py b/playground/structs/agent_with_tools_example.py index dc0dff4b..80869fcf 100644 --- a/playground/structs/agent_with_tools_example.py +++ b/playground/structs/agent_with_tools_example.py @@ -66,7 +66,7 @@ llm = OpenAIChat( ) -## Initialize the workflow +# Initialize the workflow agent = Agent( agent_name="Research Agent", llm=llm, diff --git a/playground/structs/autoscaler_example.py b/playground/structs/autoscaler_example.py index aa7cf0c0..65ba9995 100644 --- a/playground/structs/autoscaler_example.py +++ b/playground/structs/autoscaler_example.py @@ -20,7 +20,7 @@ llm = OpenAIChat( ) -## Initialize the workflow +# Initialize the workflow agent = Agent(llm=llm, max_loops=1, dashboard=True) diff --git a/playground/structs/basic_agent_with_azure_openai.py b/playground/structs/basic_agent_with_azure_openai.py index 76135a9f..f43b5cd2 100644 --- a/playground/structs/basic_agent_with_azure_openai.py +++ b/playground/structs/basic_agent_with_azure_openai.py @@ -1,6 +1,6 @@ from swarms import Agent, AzureOpenAI -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=AzureOpenAI(), max_loops="auto", diff --git a/playground/structs/custom_model_with_agent.py b/playground/structs/custom_model_with_agent.py index 8849fc41..521c8e21 100644 --- a/playground/structs/custom_model_with_agent.py +++ b/playground/structs/custom_model_with_agent.py @@ -10,7 +10,7 @@ class ExampleLLM(AbstractLLM): pass -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=ExampleLLM(), max_loops="auto", diff --git a/playground/structs/debate_example.py b/playground/structs/debate_example.py index 7cf0290b..6624f6bb 100644 --- a/playground/structs/debate_example.py +++ b/playground/structs/debate_example.py @@ -268,7 +268,7 @@ topic_specifier_prompt = [ Frame the debate topic as a problem to be solved. Be creative and imaginative. Please reply with the specified topic in {word_limit} words or less. - Speak directly to the presidential candidates: {*character_names,}. + Speak directly to the presidential candidates: {*character_names, }. Do not add anything else."""), ] specified_topic = ChatOpenAI(temperature=1.0)( diff --git a/playground/structs/easy_example.py b/playground/structs/easy_example.py index bebdb11a..e4f2e799 100644 --- a/playground/structs/easy_example.py +++ b/playground/structs/easy_example.py @@ -1,6 +1,6 @@ from swarms import Agent, OpenAIChat -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=OpenAIChat(), max_loops=1, diff --git a/playground/structs/swarm_network_example.py b/playground/structs/swarm_network_example.py index 6af9b340..d7fdd2ee 100644 --- a/playground/structs/swarm_network_example.py +++ b/playground/structs/swarm_network_example.py @@ -31,7 +31,7 @@ together_llm = TogetherLLM( together_api_key=os.getenv("TOGETHER_API_KEY"), max_tokens=3000 ) -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=anthropic, max_loops=1, diff --git a/playground/tools/agent_with_tools_example.py b/playground/tools/agent_with_tools_example.py index 35b61703..39544478 100644 --- a/playground/tools/agent_with_tools_example.py +++ b/playground/tools/agent_with_tools_example.py @@ -26,7 +26,7 @@ def search_api(query: str) -> str: print(f"Searching API for {query}") -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=llm, max_loops=5, diff --git a/pyproject.toml b/pyproject.toml index 01174e51..1f7a5e40 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,13 +71,14 @@ pandas = "^2.2.2" fastapi = "^0.110.1" [tool.ruff] -line-length = 127 +line-length = 128 [tool.ruff.lint] -select = ["E4", "E7", "E9", "F", "W", "E501", "I", "UP"] +select = ["E", "F", "W", "I", "UP"] ignore = [] fixable = ["ALL"] unfixable = [] +preview = true [tool.black] line-length = 70 diff --git a/scripts/auto_tests_docs/auto_docs.py b/scripts/auto_tests_docs/auto_docs.py index 570793c8..233065cb 100644 --- a/scripts/auto_tests_docs/auto_docs.py +++ b/scripts/auto_tests_docs/auto_docs.py @@ -1,4 +1,4 @@ -###### VERISON2 +# VERISON2 import inspect import os import threading diff --git a/swarms/models/distilled_whisperx.py b/swarms/models/distilled_whisperx.py index 951dcd10..08643cac 100644 --- a/swarms/models/distilled_whisperx.py +++ b/swarms/models/distilled_whisperx.py @@ -175,7 +175,7 @@ class DistilWhisperModel: # Print the chunk's transcription print( colored( - f"Chunk {i+1}/{len(chunks)}: ", "yellow" + f"Chunk {i + 1}/{len(chunks)}: ", "yellow" ) + transcription ) diff --git a/swarms/prompts/debate.py b/swarms/prompts/debate.py index a11c7af4..197cc618 100644 --- a/swarms/prompts/debate.py +++ b/swarms/prompts/debate.py @@ -31,7 +31,7 @@ def debate_monitor(game_description, word_limit, character_names): Frame the debate topic as a problem to be solved. Be creative and imaginative. Please reply with the specified topic in {word_limit} words or less. - Speak directly to the presidential candidates: {*character_names,}. + Speak directly to the presidential candidates: {*character_names, }. Do not add anything else. """ diff --git a/swarms/prompts/schema_generator.py b/swarms/prompts/schema_generator.py index 4213d0d6..4f20d80d 100644 --- a/swarms/prompts/schema_generator.py +++ b/swarms/prompts/schema_generator.py @@ -123,7 +123,7 @@ class SchemaGenerator: return "\n".join(command_strings + [finish_string]) else: return "\n".join( - f"{i+1}. {item}" for i, item in enumerate(items) + f"{i + 1}. {item}" for i, item in enumerate(items) ) def generate_prompt_string(self) -> str: diff --git a/swarms/prompts/tools.py b/swarms/prompts/tools.py index 390966c1..45189ec4 100644 --- a/swarms/prompts/tools.py +++ b/swarms/prompts/tools.py @@ -34,7 +34,7 @@ commands: { """ -########### FEW SHOT EXAMPLES ################ +# FEW SHOT EXAMPLES ################ SCENARIOS = """ commands: { "tools": { diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 8eb594ec..d0c5edbe 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -700,7 +700,7 @@ class Agent: except Exception as e: logger.error( - f"Attempt {attempt+1}: Error generating" + f"Attempt {attempt + 1}: Error generating" f" response: {e}" ) attempt += 1 diff --git a/swarms/structs/debate.py b/swarms/structs/debate.py index 95c889d3..b98c47c5 100644 --- a/swarms/structs/debate.py +++ b/swarms/structs/debate.py @@ -237,7 +237,7 @@ class Debate: if self.mod_ans["debate_translation"] != "": break else: - print(f"===== Debate Round-{round+2} =====\n") + print(f"===== Debate Round-{round + 2} =====\n") self.affirmative.add_message_to_memory( self.save_file["debate_prompt"].replace( "##oppo_ans##", self.neg_ans diff --git a/swarms/structs/graph_workflow.py b/swarms/structs/graph_workflow.py index 23d90339..f9b83639 100644 --- a/swarms/structs/graph_workflow.py +++ b/swarms/structs/graph_workflow.py @@ -17,7 +17,8 @@ class GraphWorkflow(BaseStructure): connect(from_node, to_node): Connects two nodes in the graph. set_entry_point(node_name): Sets the entry point node for the workflow. add_edge(from_node, to_node): Adds an edge between two nodes in the graph. - add_conditional_edges(from_node, condition, edge_dict): Adds conditional edges from a node to multiple nodes based on a condition. + add_conditional_edges(from_node, condition, edge_dict): + Adds conditional edges from a node to multiple nodes based on a condition. run(): Runs the workflow and returns the graph. Examples: @@ -126,15 +127,11 @@ class GraphWorkflow(BaseStructure): if from_node in self.graph: for condition_value, to_node in edge_dict.items(): if to_node in self.graph: - self.graph[from_node]["edges"][ - to_node - ] = condition + self.graph[from_node]["edges"][to_node] = condition else: raise ValueError("Node does not exist in graph") else: - raise ValueError( - f"Node {from_node} does not exist in graph" - ) + raise ValueError(f"Node {from_node} does not exist in graph") def run(self): """ @@ -160,9 +157,7 @@ class GraphWorkflow(BaseStructure): ValueError: _description_ """ if node_name not in self.graph: - raise ValueError( - f"Node {node_name} does not exist in graph" - ) + raise ValueError(f"Node {node_name} does not exist in graph") def _check_nodes_exist(self, from_node, to_node): """ diff --git a/swarms/structs/message_pool.py b/swarms/structs/message_pool.py index 88766d06..83d1aff6 100644 --- a/swarms/structs/message_pool.py +++ b/swarms/structs/message_pool.py @@ -20,9 +20,7 @@ def _hash(input: str): return hex_dig -def msg_hash( - agent: Agent, content: str, turn: int, msg_type: str = "text" -): +def msg_hash(agent: Agent, content: str, turn: int, msg_type: str = "text"): """ Generate a hash value for a message. @@ -37,8 +35,7 @@ def msg_hash( """ time = time_ns() return _hash( - f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:" - f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}" + f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:" f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}" ) @@ -67,11 +64,17 @@ class MessagePool: >>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1) >>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1) >>> message_pool.get_all_messages() - [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] + [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, + {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, + {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] >>> message_pool.get_visible_messages(agent=agent1, turn=1) - [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] + [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, + {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, + {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] >>> message_pool.get_visible_messages(agent=agent2, turn=1) - [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] + [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, + {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, + {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] """ def __init__( @@ -98,9 +101,7 @@ class MessagePool: logger.info("MessagePool initialized") logger.info(f"Number of agents: {len(agents)}") - logger.info( - f"Agents: {[agent.agent_name for agent in agents]}" - ) + logger.info(f"Agents: {[agent.agent_name for agent in agents]}") logger.info(f"moderator: {moderator.agent_name} is available") logger.info(f"Number of turns: {turns}") @@ -187,18 +188,11 @@ class MessagePool: List[Dict]: The list of visible messages. """ # Get the messages before the current turn - prev_messages = [ - message - for message in self.messages - if message["turn"] < turn - ] + prev_messages = [message for message in self.messages if message["turn"] < turn] visible_messages = [] for message in prev_messages: - if ( - message["visible_to"] == "all" - or agent.agent_name in message["visible_to"] - ): + if message["visible_to"] == "all" or agent.agent_name in message["visible_to"]: visible_messages.append(message) return visible_messages diff --git a/swarms/structs/meta_system_prompt.py b/swarms/structs/meta_system_prompt.py index 9c2061af..85a4f6c8 100644 --- a/swarms/structs/meta_system_prompt.py +++ b/swarms/structs/meta_system_prompt.py @@ -7,20 +7,18 @@ from swarms.prompts.meta_system_prompt import ( ) from swarms.structs.agent import Agent -meta_prompter_llm = OpenAIChat( - system_prompt=str(meta_system_prompt_generator) -) +meta_prompter_llm = OpenAIChat(system_prompt=str(meta_system_prompt_generator)) -def meta_system_prompt( - agent: Union[Agent, AbstractLLM], system_prompt: str -) -> str: +def meta_system_prompt(agent: Union[Agent, AbstractLLM], system_prompt: str) -> str: """ Generates a meta system prompt for the given agent using the provided system prompt. Args: - agent (Union[Agent, AbstractLLM]): The agent or LLM (Language Learning Model) for which the meta system prompt is generated. - system_prompt (str): The system prompt used to generate the meta system prompt. + agent (Union[Agent, AbstractLLM]): + The agent or LLM (Language Learning Model) for which the meta system prompt is generated. + system_prompt (str): + The system prompt used to generate the meta system prompt. Returns: str: The generated meta system prompt. diff --git a/swarms/structs/model_parallizer.py b/swarms/structs/model_parallizer.py index 9d27f14c..18840c73 100644 --- a/swarms/structs/model_parallizer.py +++ b/swarms/structs/model_parallizer.py @@ -88,7 +88,7 @@ class ModelParallelizer: """Save responses to file""" with open(filename, "w") as file: table = [ - [f"LLM {i+1}", response] + [f"LLM {i + 1}", response] for i, response in enumerate(self.last_responses) ] file.write(table) @@ -111,7 +111,7 @@ class ModelParallelizer: print(f"{i + 1}. {task}") print("\nLast Responses:") table = [ - [f"LLM {i+1}", response] + [f"LLM {i + 1}", response] for i, response in enumerate(self.last_responses) ] print( diff --git a/swarms/structs/multi_threaded_workflow.py b/swarms/structs/multi_threaded_workflow.py index 475251ba..31131f1d 100644 --- a/swarms/structs/multi_threaded_workflow.py +++ b/swarms/structs/multi_threaded_workflow.py @@ -120,7 +120,7 @@ class MultiThreadedWorkflow(BaseWorkflow): except Exception as e: logging.error( ( - f"Attempt {attempt+1} failed for task" + f"Attempt {attempt + 1} failed for task" f" {task}: {str(e)}" ), exc_info=True, diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 7c94f426..ceb4991d 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -186,7 +186,7 @@ class SequentialWorkflow: loops = 0 while loops < self.max_loops: for i, agent in enumerate(self.agents): - logger.info(f"Agent {i+1} is executing the task.") + logger.info(f"Agent {i + 1} is executing the task.") out = agent(self.description) self.conversation.add(agent.agent_name, str(out)) prompt = self.conversation.return_history_as_string() diff --git a/swarms/tools/tool_utils.py b/swarms/tools/tool_utils.py index 2e0e5ef9..ee692461 100644 --- a/swarms/tools/tool_utils.py +++ b/swarms/tools/tool_utils.py @@ -17,7 +17,8 @@ def scrape_tool_func_docs(fn: Callable) -> str: fn (Callable): The function to scrape. Returns: - str: A string containing the function's name, documentation string, and a list of its parameters. Each parameter is represented as a line containing the parameter's name, default value, and annotation. + str: A string containing the function's name, documentation string, and a list of its parameters. + Each parameter is represented as a line containing the parameter's name, default value, and annotation. """ try: # If the function is a tool, get the original function @@ -34,10 +35,7 @@ def scrape_tool_func_docs(fn: Callable) -> str: f" {param.annotation if param.annotation is not param.empty else 'None'}" ) parameters_str = "\n".join(parameters) - return ( - f"Function: {fn.__name__}\nDocstring:" - f" {inspect.getdoc(fn)}\nParameters:\n{parameters_str}" - ) + return f"Function: {fn.__name__}\nDocstring:" f" {inspect.getdoc(fn)}\nParameters:\n{parameters_str}" except Exception as error: print( colored( diff --git a/swarms/utils/load_model_torch.py b/swarms/utils/load_model_torch.py index 53649e93..daed0557 100644 --- a/swarms/utils/load_model_torch.py +++ b/swarms/utils/load_model_torch.py @@ -18,7 +18,8 @@ def load_model_torch( model_path (str): Path to the saved model file. device (torch.device): Device to move the model to. model (nn.Module): The model architecture, if the model file only contains the state dictionary. - strict (bool): Whether to strictly enforce that the keys in the state dictionary match the keys returned by the model's `state_dict()` function. + strict (bool): Whether to strictly enforce that the keys in the state dictionary match + the keys returned by the model's `state_dict()` function. map_location (callable): A function to remap the storage locations of the loaded model. *args: Additional arguments to pass to `torch.load`. **kwargs: Additional keyword arguments to pass to `torch.load`. @@ -31,15 +32,11 @@ def load_model_torch( RuntimeError: If there is an error while loading the model. """ if device is None: - device = torch.device( - "cuda" if torch.cuda.is_available() else "cpu" - ) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") try: if model is None: - model = torch.load( - model_path, map_location=map_location, *args, **kwargs - ) + model = torch.load(model_path, map_location=map_location, *args, **kwargs) else: model.load_state_dict( torch.load( diff --git a/swarms/utils/main.py b/swarms/utils/main.py index 57f94e1c..bf0b5c2b 100644 --- a/swarms/utils/main.py +++ b/swarms/utils/main.py @@ -156,7 +156,7 @@ class FileHandler: os.makedirs(os.path.dirname(local_filename), exist_ok=True) with open(local_filename, "wb") as f: size = f.write(data) - print(f"Inputs: {url} ({size//1000}MB) => {local_filename}") + print(f"Inputs: {url} ({size // 1000}MB) => {local_filename}") return local_filename def handle(self, url: str) -> str: @@ -170,7 +170,7 @@ class FileHandler: "SERVER", "http://localhost:8000" ) ) - + 1 : + + 1: ] local_filename = ( Path("file") / local_filepath.split("/")[-1] diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index 7e19a056..e30b047c 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -18,10 +18,7 @@ def llm_instance(): # Test for instantiation and attributes def test_llm_initialization(llm_instance): - assert ( - llm_instance.model_id - == "NousResearch/Nous-Hermes-2-Vision-Alpha" - ) + assert llm_instance.model_id == "NousResearch/Nous-Hermes-2-Vision-Alpha" assert llm_instance.max_length == 500 # ... add more assertions for all default attributes @@ -88,15 +85,11 @@ def test_llm_memory_consumption(llm_instance): ) def test_llm_initialization_params(model_id, max_length): if max_length: - instance = HuggingfaceLLM( - model_id=model_id, max_length=max_length - ) + instance = HuggingfaceLLM(model_id=model_id, max_length=max_length) assert instance.max_length == max_length else: instance = HuggingfaceLLM(model_id=model_id) - assert ( - instance.max_length == 500 - ) # Assuming 500 is the default max_length + assert instance.max_length == 500 # Assuming 500 is the default max_length # Test for setting an invalid device @@ -144,9 +137,7 @@ def test_llm_run_output_length(mock_run, llm_instance): # Test the tokenizer handling special tokens correctly @patch("swarms.models.huggingface.HuggingfaceLLM._tokenizer.encode") @patch("swarms.models.huggingface.HuggingfaceLLM._tokenizer.decode") -def test_llm_tokenizer_special_tokens( - mock_decode, mock_encode, llm_instance -): +def test_llm_tokenizer_special_tokens(mock_decode, mock_encode, llm_instance): mock_encode.return_value = "encoded input with special tokens" mock_decode.return_value = "decoded output with special tokens" result = llm_instance.run("test task with special tokens") @@ -172,9 +163,7 @@ def test_llm_response_time(mock_run, llm_instance): start_time = time.time() llm_instance.run("test task for response time") end_time = time.time() - assert ( - end_time - start_time < 1 - ) # Assuming the response should be faster than 1 second + assert end_time - start_time < 1 # Assuming the response should be faster than 1 second # Test the logging of a warning for long inputs @@ -197,13 +186,9 @@ def test_llm_run_model_exception(mock_generate, llm_instance): # Test the behavior when GPU is forced but not available @patch("torch.cuda.is_available", return_value=False) -def test_llm_force_gpu_when_unavailable( - mock_is_available, llm_instance -): +def test_llm_force_gpu_when_unavailable(mock_is_available, llm_instance): with pytest.raises(EnvironmentError): - llm_instance.set_device( - "cuda" - ) # Attempt to set CUDA when it's not available + llm_instance.set_device("cuda") # Attempt to set CUDA when it's not available # Test for proper cleanup after model use (releasing resources) @@ -221,9 +206,7 @@ def test_llm_multilingual_input(mock_run, llm_instance): mock_run.return_value = "mocked multilingual output" multilingual_input = "Bonjour, ceci est un test multilingue." result = llm_instance.run(multilingual_input) - assert isinstance( - result, str - ) # Simple check to ensure output is string type + assert isinstance(result, str) # Simple check to ensure output is string type # Test caching mechanism to prevent re-running the same inputs @@ -238,5 +221,7 @@ def test_llm_caching_mechanism(mock_run, llm_instance): assert first_run_result == second_run_result -# These tests are provided as examples. In real-world scenarios, you will need to adapt these tests to the actual logic of your `HuggingfaceLLM` class. -# For instance, "mock_model.delete.assert_called_once()" and similar lines are based on hypothetical methods and behaviors that you need to replace with actual implementations. +# These tests are provided as examples. +# In real-world scenarios, you will need to adapt these tests to the actual logic of your `HuggingfaceLLM` class. +# For instance, "mock_model.delete.assert_called_once()" and similar lines are based on hypothetical methods and behaviors +# that you need to replace with actual implementations.