From 9faf30f10231cdf18220d58915e4b615a431ab97 Mon Sep 17 00:00:00 2001 From: Aksh Parekh Date: Tue, 28 Oct 2025 18:37:38 -0700 Subject: [PATCH 01/42] [DOCS] Token Stream in Agent class --- docs/swarms/structs/agent.md | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/docs/swarms/structs/agent.md b/docs/swarms/structs/agent.md index 8947374a..42b4f4dd 100644 --- a/docs/swarms/structs/agent.md +++ b/docs/swarms/structs/agent.md @@ -83,6 +83,7 @@ The `Agent` class establishes a conversational loop with a language model, allow | `traceback` | `Optional[Any]` | Object used for traceback handling. | | `traceback_handlers` | `Optional[Any]` | List of traceback handlers. | | `streaming_on` | `Optional[bool]` | Boolean indicating whether to stream responses. | +| `stream` | `Optional[bool]` | Boolean indicating whether to enable detailed token-by-token streaming with metadata. | | `docs` | `List[str]` | List of document paths or contents to be ingested. | | `docs_folder` | `Optional[str]` | Path to a folder containing documents to be ingested. | | `verbose` | `Optional[bool]` | Boolean indicating whether to print verbose output. | @@ -759,6 +760,22 @@ print(agent.system_prompt) ``` +### Token-by-Token Streaming + +```python +from swarms import Agent + +# Initialize agent with detailed streaming +agent = Agent( + model_name="gpt-4.1", + max_loops=1, + stream=True, # Enable detailed token-by-token streaming +) + +# Run with detailed streaming - each token shows metadata +agent.run("Tell me a short story about a robot learning to paint.") +``` + ## Agent Structured Outputs - Create a structured output schema for the agent [List[Dict]] @@ -1112,4 +1129,4 @@ The `run` method now supports several new parameters for advanced functionality: | `tool_retry_attempts` | Configure tool_retry_attempts for robust tool execution in production environments. | | `handoffs` | Use handoffs to create specialized agent teams that can intelligently route tasks based on complexity and expertise requirements. | -By following these guidelines and leveraging the Swarm Agent's extensive features, you can create powerful, flexible, and efficient autonomous agents for a wide range of applications. \ No newline at end of file +By following these guidelines and leveraging the Swarm Agent's extensive features, you can create powerful, flexible, and efficient autonomous agents for a wide range of applications. From 540a61ab7856b27c10fb61da0cec7afc425e35df Mon Sep 17 00:00:00 2001 From: Aksh Parekh Date: Tue, 28 Oct 2025 18:38:07 -0700 Subject: [PATCH 02/42] [FEAT] Token Stream in Agent class --- swarms/structs/agent.py | 116 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 110 insertions(+), 6 deletions(-) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 489e91f9..b6950162 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -215,7 +215,8 @@ class Agent: preset_stopping_token (bool): Enable preset stopping token traceback (Any): The traceback traceback_handlers (Any): The traceback handlers - streaming_on (bool): Enable streaming + streaming_on (bool): Enable basic streaming with formatted panels + stream (bool): Enable detailed token-by-token streaming with metadata (citations, tokens used, etc.) docs (List[str]): The list of documents docs_folder (str): The folder containing the documents verbose (bool): Enable verbose mode @@ -307,9 +308,9 @@ class Agent: >>> print(response) >>> # Generate a report on the financials. - >>> # Real-time streaming example - >>> agent = Agent(model_name="gpt-4.1", max_loops=1, streaming_on=True) - >>> response = agent.run("Tell me a long story.") # Will stream in real-time + >>> # Detailed token streaming example + >>> agent = Agent(model_name="gpt-4.1", max_loops=1, stream=True) + >>> response = agent.run("Tell me a story.") # Will stream each token with detailed metadata >>> print(response) # Final complete response >>> # Fallback model example @@ -363,6 +364,7 @@ class Agent: traceback: Optional[Any] = None, traceback_handlers: Optional[Any] = None, streaming_on: Optional[bool] = False, + stream: Optional[bool] = False, docs: List[str] = None, docs_folder: Optional[str] = None, verbose: Optional[bool] = False, @@ -512,6 +514,7 @@ class Agent: self.traceback = traceback self.traceback_handlers = traceback_handlers self.streaming_on = streaming_on + self.stream = stream self.docs = docs self.docs_folder = docs_folder self.verbose = verbose @@ -1317,6 +1320,8 @@ class Agent: ) elif self.streaming_on: pass + elif self.stream: + pass else: self.pretty_print( response, loop_count @@ -2537,8 +2542,105 @@ class Agent: del kwargs["is_last"] try: - # Set streaming parameter in LLM if streaming is enabled - if self.streaming_on and hasattr(self.llm, "stream"): + if self.stream and hasattr(self.llm, "stream"): + original_stream = self.llm.stream + self.llm.stream = True + + if img is not None: + streaming_response = self.llm.run( + task=task, img=img, *args, **kwargs + ) + else: + streaming_response = self.llm.run( + task=task, *args, **kwargs + ) + + if hasattr(streaming_response, "__iter__") and not isinstance(streaming_response, str): + complete_response = "" + token_count = 0 + final_chunk = None + first_chunk = None + + for chunk in streaming_response: + if first_chunk is None: + first_chunk = chunk + + if hasattr(chunk, "choices") and chunk.choices[0].delta.content: + content = chunk.choices[0].delta.content + complete_response += content + token_count += 1 + + # Schema per token outputted + token_info = { + "token_index": token_count, + "model": getattr(chunk, 'model', self.get_current_model()), + "id": getattr(chunk, 'id', ''), + "created": getattr(chunk, 'created', int(time.time())), + "object": getattr(chunk, 'object', 'chat.completion.chunk'), + "token": content, + "system_fingerprint": getattr(chunk, 'system_fingerprint', ''), + "finish_reason": chunk.choices[0].finish_reason, + "citations": getattr(chunk, 'citations', None), + "provider_specific_fields": getattr(chunk, 'provider_specific_fields', None), + "service_tier": getattr(chunk, 'service_tier', 'default'), + "obfuscation": getattr(chunk, 'obfuscation', None), + "usage": getattr(chunk, 'usage', None), + "logprobs": chunk.choices[0].logprobs, + "timestamp": time.time() + } + + print(f"ResponseStream {token_info}") + + if streaming_callback is not None: + streaming_callback(token_info) + + final_chunk = chunk + + #Final ModelResponse to stream + if final_chunk and hasattr(final_chunk, 'usage') and final_chunk.usage: + usage = final_chunk.usage + print(f"ModelResponseStream(id='{getattr(final_chunk, 'id', 'N/A')}', " + f"created={getattr(final_chunk, 'created', 'N/A')}, " + f"model='{getattr(final_chunk, 'model', self.get_current_model())}', " + f"object='{getattr(final_chunk, 'object', 'chat.completion.chunk')}', " + f"system_fingerprint='{getattr(final_chunk, 'system_fingerprint', 'N/A')}', " + f"choices=[StreamingChoices(finish_reason='{final_chunk.choices[0].finish_reason}', " + f"index=0, delta=Delta(provider_specific_fields=None, content=None, role=None, " + f"function_call=None, tool_calls=None, audio=None), logprobs=None)], " + f"provider_specific_fields=None, " + f"usage=Usage(completion_tokens={usage.completion_tokens}, " + f"prompt_tokens={usage.prompt_tokens}, " + f"total_tokens={usage.total_tokens}, " + f"completion_tokens_details=CompletionTokensDetailsWrapper(" + f"accepted_prediction_tokens={usage.completion_tokens_details.accepted_prediction_tokens}, " + f"audio_tokens={usage.completion_tokens_details.audio_tokens}, " + f"reasoning_tokens={usage.completion_tokens_details.reasoning_tokens}, " + f"rejected_prediction_tokens={usage.completion_tokens_details.rejected_prediction_tokens}, " + f"text_tokens={usage.completion_tokens_details.text_tokens}), " + f"prompt_tokens_details=PromptTokensDetailsWrapper(" + f"audio_tokens={usage.prompt_tokens_details.audio_tokens}, " + f"cached_tokens={usage.prompt_tokens_details.cached_tokens}, " + f"text_tokens={usage.prompt_tokens_details.text_tokens}, " + f"image_tokens={usage.prompt_tokens_details.image_tokens})))") + else: + print(f"ModelResponseStream(id='{getattr(final_chunk, 'id', 'N/A')}', " + f"created={getattr(final_chunk, 'created', 'N/A')}, " + f"model='{getattr(final_chunk, 'model', self.get_current_model())}', " + f"object='{getattr(final_chunk, 'object', 'chat.completion.chunk')}', " + f"system_fingerprint='{getattr(final_chunk, 'system_fingerprint', 'N/A')}', " + f"choices=[StreamingChoices(finish_reason='{final_chunk.choices[0].finish_reason}', " + f"index=0, delta=Delta(provider_specific_fields=None, content=None, role=None, " + f"function_call=None, tool_calls=None, audio=None), logprobs=None)], " + f"provider_specific_fields=None)") + + + self.llm.stream = original_stream + return complete_response + else: + self.llm.stream = original_stream + return streaming_response + + elif self.streaming_on and hasattr(self.llm, "stream"): original_stream = self.llm.stream self.llm.stream = True @@ -3023,6 +3125,8 @@ class Agent: if self.streaming_on: pass + elif self.stream: + pass if self.print_on: formatter.print_panel( From 4aacf3256f7b4127af1b724af6fa1fc041f7b463 Mon Sep 17 00:00:00 2001 From: Aksh Parekh Date: Tue, 28 Oct 2025 18:40:36 -0700 Subject: [PATCH 03/42] [EXAMPLE] Token Stream in Agent class --- tests/structs/test_agent_stream_token.py | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 tests/structs/test_agent_stream_token.py diff --git a/tests/structs/test_agent_stream_token.py b/tests/structs/test_agent_stream_token.py new file mode 100644 index 00000000..5cd02207 --- /dev/null +++ b/tests/structs/test_agent_stream_token.py @@ -0,0 +1,9 @@ +from swarms.structs.agent import Agent + +agent = Agent( + model_name="gpt-4.1", + max_loops=1, + stream=True, +) + +agent.run("Tell me a short story about a robot learning to paint.") From e24bab738de45228dcb0ad50312768227a2d3610 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Nov 2025 11:52:15 +0000 Subject: [PATCH 04/42] Update markdown requirement from ~=3.8 to ~=3.10 Updates the requirements on [markdown](https://github.com/Python-Markdown/markdown) to permit the latest version. - [Release notes](https://github.com/Python-Markdown/markdown/releases) - [Changelog](https://github.com/Python-Markdown/markdown/blob/master/docs/changelog.md) - [Commits](https://github.com/Python-Markdown/markdown/compare/3.8...3.10.0) --- updated-dependencies: - dependency-name: markdown dependency-version: '3.10' dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 8878fb5a..4e9c01f7 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -24,7 +24,7 @@ mkdocs-autolinks-plugin # Requirements for core jinja2~=3.1 -markdown~=3.8 +markdown~=3.10 mkdocs-material-extensions~=1.3 pygments~=2.19 pymdown-extensions~=10.16 From 6741c8f9810e3d7ce7a6bcdda9d1ff9ae211a6b6 Mon Sep 17 00:00:00 2001 From: Aksh Parekh Date: Fri, 14 Nov 2025 23:34:25 -0800 Subject: [PATCH 05/42] [BUG-FIX] Swarm Router Bug Fix --- docs/quickstart.md | 8 +++--- docs/swarms/examples/swarm_router.md | 39 +++++++++++++++++++++------- docs/swarms/structs/index.md | 8 +++--- swarms/structs/swarm_router.py | 34 ++++++++++++++++++++++++ 4 files changed, 72 insertions(+), 17 deletions(-) diff --git a/docs/quickstart.md b/docs/quickstart.md index 1780748a..e04d9d57 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -290,14 +290,14 @@ task = "Write a short story about a robot who discovers music." # --- Example 1: SequentialWorkflow --- # Agents run one after another in a chain: Writer -> Editor -> Reviewer. print("Running a Sequential Workflow...") -sequential_router = SwarmRouter(swarm_type=SwarmType.SequentialWorkflow, agents=agents) +sequential_router = SwarmRouter(swarm_type="SequentialWorkflow", agents=agents) sequential_output = sequential_router.run(task) print(f"Final Sequential Output:\n{sequential_output}\n") # --- Example 2: ConcurrentWorkflow --- # All agents receive the same initial task and run at the same time. print("Running a Concurrent Workflow...") -concurrent_router = SwarmRouter(swarm_type=SwarmType.ConcurrentWorkflow, agents=agents) +concurrent_router = SwarmRouter(swarm_type="ConcurrentWorkflow", agents=agents) concurrent_outputs = concurrent_router.run(task) # This returns a dictionary of each agent's output for agent_name, output in concurrent_outputs.items(): @@ -312,9 +312,9 @@ aggregator = Agent( model_name="gpt-4o-mini" ) moa_router = SwarmRouter( - swarm_type=SwarmType.MixtureOfAgents, + swarm_type="MixtureOfAgents", agents=agents, - aggregator_agent=aggregator, # MoA requires an aggregator + aggregator_agent=aggregator, ) aggregated_output = moa_router.run(task) print(f"Final Aggregated Output:\n{aggregated_output}\n") diff --git a/docs/swarms/examples/swarm_router.md b/docs/swarms/examples/swarm_router.md index 7caa875c..b404bc57 100644 --- a/docs/swarms/examples/swarm_router.md +++ b/docs/swarms/examples/swarm_router.md @@ -29,7 +29,7 @@ GROQ_API_KEY="" ```python from swarms import Agent -from swarms.structs.swarm_router import SwarmRouter, SwarmType +from swarms.structs.swarm_router import SwarmRouter # Initialize specialized agents data_extractor_agent = Agent( @@ -61,7 +61,7 @@ sequential_router = SwarmRouter( name="SequentialRouter", description="Process tasks in sequence", agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent], - swarm_type=SwarmType.SequentialWorkflow, + swarm_type="SequentialWorkflow", max_loops=1 ) @@ -76,7 +76,7 @@ concurrent_router = SwarmRouter( name="ConcurrentRouter", description="Process tasks concurrently", agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent], - swarm_type=SwarmType.ConcurrentWorkflow, + swarm_type="ConcurrentWorkflow", max_loops=1 ) @@ -91,8 +91,8 @@ rearrange_router = SwarmRouter( name="RearrangeRouter", description="Dynamically rearrange agents for optimal task processing", agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent], - swarm_type=SwarmType.AgentRearrange, - flow=f"{data_extractor_agent.agent_name} -> {summarizer_agent.agent_name} -> {financial_analyst_agent.agent_name}", + swarm_type="AgentRearrange", + rearrange_flow=f"{data_extractor_agent.agent_name} -> {summarizer_agent.agent_name} -> {financial_analyst_agent.agent_name}", max_loops=1 ) @@ -107,7 +107,7 @@ mixture_router = SwarmRouter( name="MixtureRouter", description="Combine multiple expert agents", agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent], - swarm_type=SwarmType.MixtureOfAgents, + swarm_type="MixtureOfAgents", max_loops=1 ) @@ -137,7 +137,7 @@ router = SwarmRouter( name="CustomRouter", description="Custom router configuration", agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent], - swarm_type=SwarmType.SequentialWorkflow, + swarm_type="SequentialWorkflow", max_loops=3, autosave=True, verbose=True, @@ -145,6 +145,27 @@ router = SwarmRouter( ) ``` +# SwarmType Reference + +## Valid SwarmType Values + +| Value | Description | +|-------|-------------| +| `"SequentialWorkflow"` | Execute agents in sequence | +| `"ConcurrentWorkflow"` | Execute agents concurrently | +| `"AgentRearrange"` | Dynamically rearrange agent execution order | +| `"MixtureOfAgents"` | Combine outputs from multiple agents | +| `"GroupChat"` | Enable group chat between agents | +| `"MultiAgentRouter"` | Route tasks to appropriate agents | +| `"AutoSwarmBuilder"` | Automatically build swarm configuration | +| `"HiearchicalSwarm"` | Hierarchical agent organization | +| `"MajorityVoting"` | Use majority voting for decisions | +| `"MALT"` | Multi-Agent Learning and Training | +| `"CouncilAsAJudge"` | Council-based evaluation system | +| `"InteractiveGroupChat"` | Interactive group chat with agents | +| `"HeavySwarm"` | Heavy swarm for complex tasks | +| `"auto"` | Automatically select swarm type | + # Best Practices ## Choose the appropriate swarm type based on your task requirements: @@ -187,7 +208,7 @@ Here's a complete example showing how to use SwarmRouter in a real-world scenari ```python import os from swarms import Agent -from swarms.structs.swarm_router import SwarmRouter, SwarmType +from swarms.structs.swarm_router import SwarmRouter # Initialize specialized agents research_agent = Agent( @@ -216,7 +237,7 @@ router = SwarmRouter( name="ResearchAnalysisRouter", description="Process research and analysis tasks", agents=[research_agent, analysis_agent, summary_agent], - swarm_type=SwarmType.SequentialWorkflow, + swarm_type="SequentialWorkflow", max_loops=1, verbose=True ) diff --git a/docs/swarms/structs/index.md b/docs/swarms/structs/index.md index f556ae3f..5604e372 100644 --- a/docs/swarms/structs/index.md +++ b/docs/swarms/structs/index.md @@ -186,14 +186,14 @@ task = "Write a short story about a robot who discovers music." # --- Example 1: SequentialWorkflow --- # Agents run one after another in a chain: Writer -> Editor -> Reviewer. print("Running a Sequential Workflow...") -sequential_router = SwarmRouter(swarm_type=SwarmType.SequentialWorkflow, agents=agents) +sequential_router = SwarmRouter(swarm_type="SequentialWorkflow", agents=agents) sequential_output = sequential_router.run(task) print(f"Final Sequential Output:\n{sequential_output}\n") # --- Example 2: ConcurrentWorkflow --- # All agents receive the same initial task and run at the same time. print("Running a Concurrent Workflow...") -concurrent_router = SwarmRouter(swarm_type=SwarmType.ConcurrentWorkflow, agents=agents) +concurrent_router = SwarmRouter(swarm_type="ConcurrentWorkflow", agents=agents) concurrent_outputs = concurrent_router.run(task) # This returns a dictionary of each agent's output for agent_name, output in concurrent_outputs.items(): @@ -208,9 +208,9 @@ aggregator = Agent( model_name="gpt-4o-mini" ) moa_router = SwarmRouter( - swarm_type=SwarmType.MixtureOfAgents, + swarm_type="MixtureOfAgents", agents=agents, - aggregator_agent=aggregator, # MoA requires an aggregator + aggregator_agent=aggregator, ) aggregated_output = moa_router.run(task) print(f"Final Aggregated Output:\n{aggregated_output}\n") diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index b5f3fd2c..15b1500d 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -272,6 +272,40 @@ class SwarmRouter: "SwarmRouter: Swarm type cannot be 'none'. Check the docs for all the swarm types available. https://docs.swarms.world/en/latest/swarms/structs/swarm_router/" ) + # Validate swarm type is a valid string + valid_swarm_types = [ + "AgentRearrange", + "MixtureOfAgents", + "SequentialWorkflow", + "ConcurrentWorkflow", + "GroupChat", + "MultiAgentRouter", + "AutoSwarmBuilder", + "HiearchicalSwarm", + "auto", + "MajorityVoting", + "MALT", + "CouncilAsAJudge", + "InteractiveGroupChat", + "HeavySwarm", + "BatchedGridWorkflow", + ] + + if not isinstance(self.swarm_type, str): + raise SwarmRouterConfigError( + f"SwarmRouter: swarm_type must be a string, not {type(self.swarm_type).__name__}. " + f"Valid types are: {', '.join(valid_swarm_types)}. " + "Use swarm_type='SequentialWorkflow' (string), NOT SwarmType.SequentialWorkflow. " + "See https://docs.swarms.world/en/latest/swarms/structs/swarm_router/" + ) + + if self.swarm_type not in valid_swarm_types: + raise SwarmRouterConfigError( + f"SwarmRouter: Invalid swarm_type '{self.swarm_type}'. " + f"Valid types are: {', '.join(valid_swarm_types)}. " + "See https://docs.swarms.world/en/latest/swarms/structs/swarm_router/" + ) + if ( self.swarm_type != "HeavySwarm" and self.agents is None From 822d81fc5e303d14c032777968dbbfc97ee56c8d Mon Sep 17 00:00:00 2001 From: Aksh Parekh Date: Sat, 15 Nov 2025 00:03:37 -0800 Subject: [PATCH 06/42] [BUG-FIX] Reflexion Agent task attribute error --- swarms/agents/reasoning_agents.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py index be2e34fc..ea4e4b33 100644 --- a/swarms/agents/reasoning_agents.py +++ b/swarms/agents/reasoning_agents.py @@ -284,8 +284,12 @@ class ReasoningAgentRouter: The result of the reasoning process (format depends on agent and output_type). """ try: - swarm = self.select_swarm() - return swarm.run(task=task, *args, **kwargs) + if self.swarm_type == "ReflexionAgent": + swarm = self.select_swarm() + return swarm.run(tasks=[task], *args, **kwargs) + else: + swarm = self.select_swarm() + return swarm.run(task=task, *args, **kwargs) except Exception as e: raise ReasoningAgentExecutorError( f"ReasoningAgentRouter Error: {e} Traceback: {traceback.format_exc()} If the error persists, please check the agent's configuration and try again. If you would like support book a call with our team at https://cal.com/swarms" From 88eaeeda582b82253f973eaf33d1b393afc40515 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 17 Nov 2025 15:31:00 -0800 Subject: [PATCH 07/42] move examples --- .../agent_integration/x402_agent_buying.py | 20 +++++++++---------- .../agent_integration/x402_discovery_query.py | 4 +--- .../single_agent/agent_mcp.py | 0 pyproject.toml | 2 +- 4 files changed, 12 insertions(+), 14 deletions(-) rename agent_mcp.py => examples/single_agent/agent_mcp.py (100%) diff --git a/examples/guides/x402_examples/agent_integration/x402_agent_buying.py b/examples/guides/x402_examples/agent_integration/x402_agent_buying.py index e61f4466..4993125a 100644 --- a/examples/guides/x402_examples/agent_integration/x402_agent_buying.py +++ b/examples/guides/x402_examples/agent_integration/x402_agent_buying.py @@ -1,4 +1,3 @@ -from x402.client import X402Client from eth_account import Account from x402.clients.httpx import x402HttpxClient @@ -10,8 +9,7 @@ load_dotenv() async def buy_x402_service( - base_url: str = None, - endpoint: str = None + base_url: str = None, endpoint: str = None ): """ Purchase a service from the X402 bazaar using the provided affordable_service details. @@ -31,20 +29,22 @@ async def buy_x402_service( ```python affordable_service = {"id": "service123", "price": 90000} response = await buy_x402_service( - affordable_service, - base_url="https://api.cdp.coinbase.com", + affordable_service, + base_url="https://api.cdp.coinbase.com", endpoint="/x402/v1/bazaar/services/service123" ) print(await response.aread()) ``` """ - key = os.getenv('X402_PRIVATE_KEY') - + key = os.getenv("X402_PRIVATE_KEY") + # Set up your payment account from private key account = Account.from_key(key) - async with x402HttpxClient(account=account, base_url=base_url) as client: + async with x402HttpxClient( + account=account, base_url=base_url + ) as client: response = await client.get(endpoint) print(await response.aread()) - - return response \ No newline at end of file + + return response diff --git a/examples/guides/x402_examples/agent_integration/x402_discovery_query.py b/examples/guides/x402_examples/agent_integration/x402_discovery_query.py index c9424172..3664718f 100644 --- a/examples/guides/x402_examples/agent_integration/x402_discovery_query.py +++ b/examples/guides/x402_examples/agent_integration/x402_discovery_query.py @@ -4,7 +4,6 @@ from swarms import Agent import httpx - async def query_x402_services( limit: Optional[int] = None, max_price: Optional[int] = None, @@ -207,7 +206,6 @@ def get_x402_services_sync( return str(services) - agent = Agent( agent_name="X402-Discovery-Agent", agent_description="A agent that queries the x402 discovery services from the Coinbase CDP API.", @@ -228,4 +226,4 @@ if __name__ == "__main__": out = agent.run( task="Summarize the first 10 services under 100000 atomic units (e.g., $0.10 USDC)" ) - print(out) \ No newline at end of file + print(out) diff --git a/agent_mcp.py b/examples/single_agent/agent_mcp.py similarity index 100% rename from agent_mcp.py rename to examples/single_agent/agent_mcp.py diff --git a/pyproject.toml b/pyproject.toml index c9f3627a..d1d2e088 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "8.6.1" +version = "8.6.2" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] From e1fc052139ae91aa5f116d0b165434ebe4a0c387 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 17 Nov 2025 15:49:52 -0800 Subject: [PATCH 08/42] [Examples][ReasoningAgentRouter] --- examples/rag/README.md | 6 - examples/rag/qdrant_rag_example.py | 98 -- examples/reasoning_agents/README.md | 20 +- examples/reasoning_agents/example_o3.py | 18 - .../agent_judge_example.py | 9 + .../gkp_agent_example.py | 9 + .../ire_example.py | 11 + .../reasoning_duo_example.py | 9 + .../reflexion_agent_example.py | 10 + .../self_consistency_example.py | 10 + .../llms}/o3_agent.py | 0 .../single_agent/rag/qdrant_rag_example.py | 961 ++---------------- pyproject.toml | 2 +- swarms/agents/reasoning_agents.py | 4 +- 14 files changed, 163 insertions(+), 1004 deletions(-) delete mode 100644 examples/rag/README.md delete mode 100644 examples/rag/qdrant_rag_example.py delete mode 100644 examples/reasoning_agents/example_o3.py create mode 100644 examples/reasoning_agents/reasoning_agent_router_examples/agent_judge_example.py create mode 100644 examples/reasoning_agents/reasoning_agent_router_examples/gkp_agent_example.py create mode 100644 examples/reasoning_agents/reasoning_agent_router_examples/ire_example.py create mode 100644 examples/reasoning_agents/reasoning_agent_router_examples/reasoning_duo_example.py create mode 100644 examples/reasoning_agents/reasoning_agent_router_examples/reflexion_agent_example.py create mode 100644 examples/reasoning_agents/reasoning_agent_router_examples/self_consistency_example.py rename examples/{reasoning_agents => single_agent/llms}/o3_agent.py (100%) diff --git a/examples/rag/README.md b/examples/rag/README.md deleted file mode 100644 index bde13960..00000000 --- a/examples/rag/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# RAG (Retrieval Augmented Generation) Examples - -This directory contains examples demonstrating RAG implementations and vector database integrations in Swarms. - -## Qdrant RAG -- [qdrant_rag_example.py](qdrant_rag_example.py) - Complete Qdrant RAG implementation diff --git a/examples/rag/qdrant_rag_example.py b/examples/rag/qdrant_rag_example.py deleted file mode 100644 index e9209970..00000000 --- a/examples/rag/qdrant_rag_example.py +++ /dev/null @@ -1,98 +0,0 @@ -""" -Agent with Qdrant RAG (Retrieval-Augmented Generation) - -This example demonstrates using Qdrant as a vector database for RAG operations, -allowing agents to store and retrieve documents for enhanced context. -""" - -from qdrant_client import QdrantClient, models -from swarms import Agent -from swarms_memory import QdrantDB - - -# Initialize Qdrant client -# Option 1: In-memory (for testing/development - data is not persisted) -# client = QdrantClient(":memory:") - -# Option 2: Local Qdrant server -# client = QdrantClient(host="localhost", port=6333) - -# Option 3: Qdrant Cloud (recommended for production) -import os - -client = QdrantClient( - url=os.getenv("QDRANT_URL", "https://your-cluster.qdrant.io"), - api_key=os.getenv("QDRANT_API_KEY", "your-api-key"), -) - -# Create QdrantDB wrapper for RAG operations -rag_db = QdrantDB( - client=client, - embedding_model="text-embedding-3-small", - collection_name="knowledge_base", - distance=models.Distance.COSINE, - n_results=3, -) - -# Add documents to the knowledge base -documents = [ - "Qdrant is a vector database optimized for similarity search and AI applications.", - "RAG combines retrieval and generation for more accurate AI responses.", - "Vector embeddings enable semantic search across documents.", - "The swarms framework supports multiple memory backends including Qdrant.", -] - -# Method 1: Add documents individually -for doc in documents: - rag_db.add(doc) - -# Method 2: Batch add documents (more efficient for large datasets) -# Example with metadata -# documents_with_metadata = [ -# "Machine learning is a subset of artificial intelligence.", -# "Deep learning uses neural networks with multiple layers.", -# "Natural language processing enables computers to understand human language.", -# "Computer vision allows machines to interpret visual information.", -# "Reinforcement learning learns through interaction with an environment." -# ] -# -# metadata = [ -# {"category": "AI", "difficulty": "beginner", "topic": "overview"}, -# {"category": "ML", "difficulty": "intermediate", "topic": "neural_networks"}, -# {"category": "NLP", "difficulty": "intermediate", "topic": "language"}, -# {"category": "CV", "difficulty": "advanced", "topic": "vision"}, -# {"category": "RL", "difficulty": "advanced", "topic": "learning"} -# ] -# -# # Batch add with metadata -# doc_ids = rag_db.batch_add(documents_with_metadata, metadata=metadata, batch_size=3) -# print(f"Added {len(doc_ids)} documents in batch") -# -# # Query with metadata return -# results_with_metadata = rag_db.query( -# "What is artificial intelligence?", -# n_results=3, -# return_metadata=True -# ) -# -# for i, result in enumerate(results_with_metadata): -# print(f"\nResult {i+1}:") -# print(f" Document: {result['document']}") -# print(f" Category: {result['category']}") -# print(f" Difficulty: {result['difficulty']}") -# print(f" Topic: {result['topic']}") -# print(f" Score: {result['score']:.4f}") - -# Create agent with RAG capabilities -agent = Agent( - agent_name="RAG-Agent", - agent_description="Agent with Qdrant-powered RAG for enhanced knowledge retrieval", - model_name="gpt-4.1", - max_loops=1, - dynamic_temperature_enabled=True, - long_term_memory=rag_db, -) - -# Query with RAG -response = agent.run("What is Qdrant and how does it relate to RAG?") -print(response) diff --git a/examples/reasoning_agents/README.md b/examples/reasoning_agents/README.md index 30292db1..8d6444c7 100644 --- a/examples/reasoning_agents/README.md +++ b/examples/reasoning_agents/README.md @@ -2,11 +2,25 @@ This directory contains examples demonstrating advanced reasoning capabilities and agent evaluation systems in Swarms. +## Reasoning Agent Router Examples + +The `reasoning_agent_router_examples/` folder contains simple examples for each agent type supported by the `ReasoningAgentRouter`: + +- [reasoning_duo_example.py](reasoning_agent_router_examples/reasoning_duo_example.py) - Reasoning Duo agent for collaborative reasoning +- [self_consistency_example.py](reasoning_agent_router_examples/self_consistency_example.py) - Self-Consistency agent with multiple samples +- [ire_example.py](reasoning_agent_router_examples/ire_example.py) - Iterative Reflective Expansion (IRE) agent +- [agent_judge_example.py](reasoning_agent_router_examples/agent_judge_example.py) - Agent Judge for evaluation and judgment +- [reflexion_agent_example.py](reasoning_agent_router_examples/reflexion_agent_example.py) - Reflexion agent with memory capabilities +- [gkp_agent_example.py](reasoning_agent_router_examples/gkp_agent_example.py) - Generated Knowledge Prompting (GKP) agent + ## Agent Judge Examples + +The `agent_judge_examples/` folder contains detailed examples of the AgentJudge system: + - [example1_basic_evaluation.py](agent_judge_examples/example1_basic_evaluation.py) - Basic agent evaluation - [example2_technical_evaluation.py](agent_judge_examples/example2_technical_evaluation.py) - Technical evaluation criteria - [example3_creative_evaluation.py](agent_judge_examples/example3_creative_evaluation.py) - Creative evaluation patterns -## O3 Integration -- [example_o3.py](example_o3.py) - O3 model integration example -- [o3_agent.py](o3_agent.py) - O3 agent implementation +## Self-MoA Sequential Examples + +- [moa_seq_example.py](moa_seq_example.py) - Self-MoA Sequential reasoning example for complex problem-solving diff --git a/examples/reasoning_agents/example_o3.py b/examples/reasoning_agents/example_o3.py deleted file mode 100644 index 48e01870..00000000 --- a/examples/reasoning_agents/example_o3.py +++ /dev/null @@ -1,18 +0,0 @@ -from swarms.utils.litellm_wrapper import LiteLLM - -# Initialize the LiteLLM wrapper with reasoning support -llm = LiteLLM( - model_name="claude-sonnet-4-20250514", # OpenAI o3 model with reasoning - reasoning_effort="low", # Enable reasoning with high effort - temperature=1, - max_tokens=2000, - stream=False, - thinking_tokens=1024, -) - -# Example task that would benefit from reasoning -task = "Solve this step-by-step: A farmer has 17 sheep and all but 9 die. How many sheep does he have left?" - -print("=== Running reasoning model ===") -response = llm.run(task) -print(response) diff --git a/examples/reasoning_agents/reasoning_agent_router_examples/agent_judge_example.py b/examples/reasoning_agents/reasoning_agent_router_examples/agent_judge_example.py new file mode 100644 index 00000000..64b67d21 --- /dev/null +++ b/examples/reasoning_agents/reasoning_agent_router_examples/agent_judge_example.py @@ -0,0 +1,9 @@ +from swarms.agents.reasoning_agents import ReasoningAgentRouter + +router = ReasoningAgentRouter( + swarm_type="AgentJudge", + model_name="gpt-4o-mini", + max_loops=1, +) + +result = router.run("Is Python a good programming language?") diff --git a/examples/reasoning_agents/reasoning_agent_router_examples/gkp_agent_example.py b/examples/reasoning_agents/reasoning_agent_router_examples/gkp_agent_example.py new file mode 100644 index 00000000..e6dbb60e --- /dev/null +++ b/examples/reasoning_agents/reasoning_agent_router_examples/gkp_agent_example.py @@ -0,0 +1,9 @@ +from swarms.agents.reasoning_agents import ReasoningAgentRouter + +router = ReasoningAgentRouter( + swarm_type="GKPAgent", + model_name="gpt-4o-mini", + num_knowledge_items=3, +) + +result = router.run("What is artificial intelligence?") diff --git a/examples/reasoning_agents/reasoning_agent_router_examples/ire_example.py b/examples/reasoning_agents/reasoning_agent_router_examples/ire_example.py new file mode 100644 index 00000000..b1f3bc00 --- /dev/null +++ b/examples/reasoning_agents/reasoning_agent_router_examples/ire_example.py @@ -0,0 +1,11 @@ +from swarms.agents.reasoning_agents import ReasoningAgentRouter + +router = ReasoningAgentRouter( + swarm_type="ire", + model_name="gpt-4o-mini", + num_samples=1, +) + +result = router.run("Explain photosynthesis in one sentence.") +print(result) + diff --git a/examples/reasoning_agents/reasoning_agent_router_examples/reasoning_duo_example.py b/examples/reasoning_agents/reasoning_agent_router_examples/reasoning_duo_example.py new file mode 100644 index 00000000..161fd590 --- /dev/null +++ b/examples/reasoning_agents/reasoning_agent_router_examples/reasoning_duo_example.py @@ -0,0 +1,9 @@ +from swarms.agents.reasoning_agents import ReasoningAgentRouter + +router = ReasoningAgentRouter( + swarm_type="reasoning-duo", + model_name="gpt-4o-mini", + max_loops=1, +) + +result = router.run("What is 2+2?") diff --git a/examples/reasoning_agents/reasoning_agent_router_examples/reflexion_agent_example.py b/examples/reasoning_agents/reasoning_agent_router_examples/reflexion_agent_example.py new file mode 100644 index 00000000..97bd8ebe --- /dev/null +++ b/examples/reasoning_agents/reasoning_agent_router_examples/reflexion_agent_example.py @@ -0,0 +1,10 @@ +from swarms.agents.reasoning_agents import ReasoningAgentRouter + +router = ReasoningAgentRouter( + swarm_type="ReflexionAgent", + model_name="gpt-4o-mini", + max_loops=1, + memory_capacity=3, +) + +result = router.run("What is machine learning?") diff --git a/examples/reasoning_agents/reasoning_agent_router_examples/self_consistency_example.py b/examples/reasoning_agents/reasoning_agent_router_examples/self_consistency_example.py new file mode 100644 index 00000000..1e1c394b --- /dev/null +++ b/examples/reasoning_agents/reasoning_agent_router_examples/self_consistency_example.py @@ -0,0 +1,10 @@ +from swarms.agents.reasoning_agents import ReasoningAgentRouter + +router = ReasoningAgentRouter( + swarm_type="self-consistency", + model_name="gpt-4o-mini", + max_loops=1, + num_samples=3, +) + +result = router.run("What is the capital of France?") diff --git a/examples/reasoning_agents/o3_agent.py b/examples/single_agent/llms/o3_agent.py similarity index 100% rename from examples/reasoning_agents/o3_agent.py rename to examples/single_agent/llms/o3_agent.py diff --git a/examples/single_agent/rag/qdrant_rag_example.py b/examples/single_agent/rag/qdrant_rag_example.py index 2b88cf4a..87caf17a 100644 --- a/examples/single_agent/rag/qdrant_rag_example.py +++ b/examples/single_agent/rag/qdrant_rag_example.py @@ -1,882 +1,91 @@ -""" -Qdrant RAG Example with Document Ingestion - -This example demonstrates how to use the agent structure from example.py with Qdrant RAG -to ingest a vast array of PDF documents and text files for advanced quantitative trading analysis. - -Features: -- Document ingestion from multiple file types (PDF, TXT, MD) -- Qdrant vector database integration -- Sentence transformer embeddings -- Comprehensive document processing pipeline -- Agent with RAG capabilities for financial analysis -""" - -import os -import uuid -from datetime import datetime -from pathlib import Path -from typing import Dict, List, Optional, Union -import concurrent.futures -from concurrent.futures import ThreadPoolExecutor - -from qdrant_client import QdrantClient -from qdrant_client.http import models -from qdrant_client.http.models import Distance, VectorParams -from sentence_transformers import SentenceTransformer - +from qdrant_client import QdrantClient, models from swarms import Agent -from swarms.utils.pdf_to_text import pdf_to_text -from swarms.utils.data_to_text import data_to_text - - -class DocumentProcessor: - """ - Handles document processing and text extraction from various file formats. - - This class provides functionality to process PDF, TXT, and Markdown files, - extracting text content for vectorization and storage in the RAG system. - """ - - def __init__( - self, supported_extensions: Optional[List[str]] = None - ): - """ - Initialize the DocumentProcessor. - - Args: - supported_extensions: List of supported file extensions. - Defaults to ['.pdf', '.txt', '.md'] - """ - if supported_extensions is None: - supported_extensions = [".pdf", ".txt", ".md"] - - self.supported_extensions = supported_extensions - - def process_document( - self, file_path: Union[str, Path] - ) -> Optional[Dict[str, str]]: - """ - Process a single document and extract its text content. - - Args: - file_path: Path to the document file - - Returns: - Dictionary containing document metadata and extracted text, or None if processing fails - """ - file_path = Path(file_path) - - if not file_path.exists(): - print(f"File not found: {file_path}") - return None - - if file_path.suffix.lower() not in self.supported_extensions: - print(f"Unsupported file type: {file_path.suffix}") - return None - - try: - # Extract text based on file type - if file_path.suffix.lower() == ".pdf": - try: - text_content = pdf_to_text(str(file_path)) - except Exception as pdf_error: - print(f"Error extracting PDF text: {pdf_error}") - # Fallback: try to read as text file - with open( - file_path, - "r", - encoding="utf-8", - errors="ignore", - ) as f: - text_content = f.read() - else: - try: - text_content = data_to_text(str(file_path)) - except Exception as data_error: - print(f"Error extracting text: {data_error}") - # Fallback: try to read as text file - with open( - file_path, - "r", - encoding="utf-8", - errors="ignore", - ) as f: - text_content = f.read() - - # Ensure text_content is a string - if callable(text_content): - print( - f"Warning: {file_path} returned a callable, trying to call it..." - ) - try: - text_content = text_content() - except Exception as call_error: - print(f"Error calling callable: {call_error}") - return None - - if not text_content or not isinstance(text_content, str): - print( - f"No valid text content extracted from: {file_path}" - ) - return None - - # Clean the text content - text_content = str(text_content).strip() - - return { - "file_path": str(file_path), - "file_name": file_path.name, - "file_type": file_path.suffix.lower(), - "text_content": text_content, - "file_size": file_path.stat().st_size, - "processed_at": datetime.utcnow().isoformat(), - } - - except Exception as e: - print(f"Error processing {file_path}: {str(e)}") - return None - - def process_directory( - self, directory_path: Union[str, Path], max_workers: int = 4 - ) -> List[Dict[str, str]]: - """ - Process all supported documents in a directory concurrently. - - Args: - directory_path: Path to the directory containing documents - max_workers: Maximum number of concurrent workers for processing - - Returns: - List of processed document dictionaries - """ - directory_path = Path(directory_path) - - if not directory_path.is_dir(): - print(f"Directory not found: {directory_path}") - return [] - - # Find all supported files - supported_files = [] - for ext in self.supported_extensions: - supported_files.extend(directory_path.rglob(f"*{ext}")) - supported_files.extend( - directory_path.rglob(f"*{ext.upper()}") - ) - - if not supported_files: - print(f"No supported files found in: {directory_path}") - return [] - - print(f"Found {len(supported_files)} files to process") - - # Process files concurrently - processed_documents = [] - with ThreadPoolExecutor(max_workers=max_workers) as executor: - future_to_file = { - executor.submit( - self.process_document, file_path - ): file_path - for file_path in supported_files - } - - for future in concurrent.futures.as_completed( - future_to_file - ): - file_path = future_to_file[future] - try: - result = future.result() - if result: - processed_documents.append(result) - print(f"Processed: {result['file_name']}") - except Exception as e: - print(f"Error processing {file_path}: {str(e)}") - - print( - f"Successfully processed {len(processed_documents)} documents" - ) - return processed_documents - - -class QdrantRAGMemory: - """ - Enhanced Qdrant memory system for RAG operations with document storage. - - This class extends the basic Qdrant memory system to handle document ingestion, - chunking, and semantic search for large document collections. - """ - - def __init__( - self, - collection_name: str = "document_memories", - vector_size: int = 384, # Default size for all-MiniLM-L6-v2 - url: Optional[str] = None, - api_key: Optional[str] = None, - chunk_size: int = 1000, - chunk_overlap: int = 200, - ): - """ - Initialize the Qdrant RAG memory system. - - Args: - collection_name: Name of the Qdrant collection to use - vector_size: Dimension of the embedding vectors - url: Optional Qdrant server URL (defaults to local) - api_key: Optional Qdrant API key for cloud deployment - chunk_size: Size of text chunks for processing - chunk_overlap: Overlap between consecutive chunks - """ - self.collection_name = collection_name - self.vector_size = vector_size - self.chunk_size = chunk_size - self.chunk_overlap = chunk_overlap - - # Initialize Qdrant client - if url and api_key: - self.client = QdrantClient(url=url, api_key=api_key) - else: - self.client = QdrantClient( - ":memory:" - ) # Local in-memory storage - - # Initialize embedding model - self.embedding_model = SentenceTransformer("all-MiniLM-L6-v2") - - # Get the actual embedding dimension from the model - sample_text = "Sample text for dimension check" - sample_embedding = self.embedding_model.encode(sample_text) - actual_dimension = len(sample_embedding) - - # Update vector_size to match the actual model dimension - if actual_dimension != self.vector_size: - print( - f"Updating vector size from {self.vector_size} to {actual_dimension} to match model" - ) - self.vector_size = actual_dimension - - # Create collection if it doesn't exist - self._create_collection() - - def _create_collection(self): - """Create the Qdrant collection if it doesn't exist.""" - collections = self.client.get_collections().collections - exists = any( - col.name == self.collection_name for col in collections - ) - - if not exists: - self.client.create_collection( - collection_name=self.collection_name, - vectors_config=VectorParams( - size=self.vector_size, distance=Distance.COSINE - ), - ) - print( - f"Created Qdrant collection: {self.collection_name}" - ) - - def _chunk_text(self, text: str) -> List[str]: - """ - Split text into overlapping chunks for better retrieval. - - Args: - text: Text content to chunk - - Returns: - List of text chunks - """ - # Ensure text is a string - if not isinstance(text, str): - text = str(text) - - if len(text) <= self.chunk_size: - return [text] - - chunks = [] - start = 0 - - while start < len(text): - end = start + self.chunk_size - - # Try to break at sentence boundaries - if end < len(text): - # Look for sentence endings - for i in range(end, max(start, end - 100), -1): - if text[i] in ".!?": - end = i + 1 - break - - chunk = text[start:end].strip() - if chunk: - chunks.append(chunk) - - start = end - self.chunk_overlap - if start >= len(text): - break - - return chunks - - def add_document( - self, document_data: Dict[str, str] - ) -> List[str]: - """ - Add a document to the memory system with chunking. - - Args: - document_data: Dictionary containing document information - - Returns: - List of memory IDs for the stored chunks - """ - text_content = document_data["text_content"] - - # Ensure text_content is a string - if not isinstance(text_content, str): - print( - f"Warning: text_content is not a string: {type(text_content)}" - ) - text_content = str(text_content) - - chunks = self._chunk_text(text_content) - - memory_ids = [] - - for i, chunk in enumerate(chunks): - # Generate embedding for the chunk - embedding = self.embedding_model.encode(chunk).tolist() - - # Prepare metadata - metadata = { - "document_name": document_data["file_name"], - "document_path": document_data["file_path"], - "document_type": document_data["file_type"], - "chunk_index": i, - "total_chunks": len(chunks), - "chunk_text": chunk, - "timestamp": datetime.utcnow().isoformat(), - "file_size": document_data["file_size"], - } - - # Store the chunk - memory_id = str(uuid.uuid4()) - self.client.upsert( - collection_name=self.collection_name, - points=[ - models.PointStruct( - id=memory_id, - payload=metadata, - vector=embedding, - ) - ], - ) - - memory_ids.append(memory_id) - - print( - f"Added document '{document_data['file_name']}' with {len(chunks)} chunks" - ) - return memory_ids - - def add_documents_batch( - self, documents: List[Dict[str, str]] - ) -> List[str]: - """ - Add multiple documents to the memory system. +from swarms_memory import QdrantDB - Args: - documents: List of document dictionaries - Returns: - List of all memory IDs - """ - all_memory_ids = [] +# Initialize Qdrant client +# Option 1: In-memory (for testing/development - data is not persisted) +# client = QdrantClient(":memory:") - for document in documents: - memory_ids = self.add_document(document) - all_memory_ids.extend(memory_ids) - - return all_memory_ids - - def add(self, text: str, metadata: Optional[Dict] = None) -> str: - """ - Add a text entry to the memory system (required by Swarms interface). - - Args: - text: The text content to add - metadata: Optional metadata for the entry - - Returns: - str: ID of the stored memory - """ - if metadata is None: - metadata = {} - - # Generate embedding for the text - embedding = self.embedding_model.encode(text).tolist() - - # Prepare metadata - memory_metadata = { - "text": text, - "timestamp": datetime.utcnow().isoformat(), - "source": "agent_memory", - } - memory_metadata.update(metadata) - - # Store the point - memory_id = str(uuid.uuid4()) - self.client.upsert( - collection_name=self.collection_name, - points=[ - models.PointStruct( - id=memory_id, - payload=memory_metadata, - vector=embedding, - ) - ], - ) - - return memory_id - - def query( - self, - query_text: str, - limit: int = 5, - score_threshold: float = 0.7, - include_metadata: bool = True, - ) -> List[Dict]: - """ - Query memories based on text similarity. - - Args: - query_text: The text query to search for - limit: Maximum number of results to return - score_threshold: Minimum similarity score threshold - include_metadata: Whether to include metadata in results - - Returns: - List of matching memories with their metadata - """ - try: - # Check if collection has any points - collection_info = self.client.get_collection( - self.collection_name - ) - if collection_info.points_count == 0: - print( - "Warning: Collection is empty, no documents to query" - ) - return [] - - # Generate embedding for the query - query_embedding = self.embedding_model.encode( - query_text - ).tolist() - - # Search in Qdrant - results = self.client.search( - collection_name=self.collection_name, - query_vector=query_embedding, - limit=limit, - score_threshold=score_threshold, - ) - - memories = [] - for res in results: - memory = res.payload.copy() - memory["similarity_score"] = res.score - - if not include_metadata: - # Keep only essential information - memory = { - "chunk_text": memory.get("chunk_text", ""), - "document_name": memory.get( - "document_name", "" - ), - "similarity_score": memory[ - "similarity_score" - ], - } - - memories.append(memory) - - return memories - - except Exception as e: - print(f"Error querying collection: {e}") - return [] - - def get_collection_stats(self) -> Dict: - """ - Get statistics about the collection. - - Returns: - Dictionary containing collection statistics - """ - try: - collection_info = self.client.get_collection( - self.collection_name - ) - return { - "collection_name": self.collection_name, - "vector_size": collection_info.config.params.vectors.size, - "distance": collection_info.config.params.vectors.distance, - "points_count": collection_info.points_count, - } - except Exception as e: - print(f"Error getting collection stats: {e}") - return {} - - def clear_collection(self): - """Clear all memories from the collection.""" - self.client.delete_collection(self.collection_name) - self._create_collection() - print(f"Cleared collection: {self.collection_name}") - - -class QuantitativeTradingRAGAgent: - """ - Advanced quantitative trading agent with RAG capabilities for document analysis. - - This agent combines the structure from example.py with Qdrant RAG to provide - comprehensive financial analysis based on ingested documents. - """ - - def __init__( - self, - agent_name: str = "Quantitative-Trading-RAG-Agent", - collection_name: str = "financial_documents", - qdrant_url: Optional[str] = None, - qdrant_api_key: Optional[str] = None, - model_name: str = "claude-sonnet-4-20250514", - max_loops: int = 1, - chunk_size: int = 1000, - chunk_overlap: int = 200, - ): - """ - Initialize the Quantitative Trading RAG Agent. - - Args: - agent_name: Name of the agent - collection_name: Name of the Qdrant collection - qdrant_url: Optional Qdrant server URL - qdrant_api_key: Optional Qdrant API key - model_name: LLM model to use - max_loops: Maximum number of agent loops - chunk_size: Size of text chunks for processing - chunk_overlap: Overlap between consecutive chunks - """ - self.agent_name = agent_name - self.collection_name = collection_name - - # Initialize document processor - self.document_processor = DocumentProcessor() - - # Initialize Qdrant RAG memory - self.rag_memory = QdrantRAGMemory( - collection_name=collection_name, - url=qdrant_url, - api_key=qdrant_api_key, - chunk_size=chunk_size, - chunk_overlap=chunk_overlap, - ) - - # Initialize the agent with RAG capabilities - self.agent = Agent( - agent_name=agent_name, - agent_description="Advanced quantitative trading and algorithmic analysis agent with RAG capabilities", - system_prompt="""You are an expert quantitative trading agent with deep expertise in: - - Algorithmic trading strategies and implementation - - Statistical arbitrage and market making - - Risk management and portfolio optimization - - High-frequency trading systems - - Market microstructure analysis - - Quantitative research methodologies - - Financial mathematics and stochastic processes - - Machine learning applications in trading - - Your core responsibilities include: - 1. Developing and backtesting trading strategies - 2. Analyzing market data and identifying alpha opportunities - 3. Implementing risk management frameworks - 4. Optimizing portfolio allocations - 5. Conducting quantitative research - 6. Monitoring market microstructure - 7. Evaluating trading system performance - - You have access to a comprehensive document database through RAG (Retrieval-Augmented Generation). - When answering questions, you can search through this database to find relevant information - and provide evidence-based responses. - - You maintain strict adherence to: - - Mathematical rigor in all analyses - - Statistical significance in strategy development - - Risk-adjusted return optimization - - Market impact minimization - - Regulatory compliance - - Transaction cost analysis - - Performance attribution - - You communicate in precise, technical terms while maintaining clarity for stakeholders.""", - model_name=model_name, - dynamic_temperature_enabled=True, - output_type="str-all-except-first", - max_loops=max_loops, - dynamic_context_window=True, - long_term_memory=self.rag_memory, - ) - - def ingest_documents( - self, documents_path: Union[str, Path] - ) -> int: - """ - Ingest documents from a directory into the RAG system. - - Args: - documents_path: Path to directory containing documents - - Returns: - Number of documents successfully ingested - """ - print(f"Starting document ingestion from: {documents_path}") - - try: - # Process documents - processed_documents = ( - self.document_processor.process_directory( - documents_path - ) - ) - - if not processed_documents: - print("No documents to ingest") - return 0 - - # Add documents to RAG memory - memory_ids = self.rag_memory.add_documents_batch( - processed_documents - ) - - print( - f"Successfully ingested {len(processed_documents)} documents" - ) - print(f"Created {len(memory_ids)} memory chunks") - - return len(processed_documents) - - except Exception as e: - print(f"Error during document ingestion: {e}") - import traceback - - traceback.print_exc() - return 0 - - def query_documents( - self, query: str, limit: int = 5 - ) -> List[Dict]: - """ - Query the document database for relevant information. - - Args: - query: The query text - limit: Maximum number of results to return - - Returns: - List of relevant document chunks - """ - return self.rag_memory.query(query, limit=limit) - - def run_analysis(self, task: str) -> str: - """ - Run a financial analysis task using the agent with RAG capabilities. - - Args: - task: The analysis task to perform - - Returns: - Agent's response to the task - """ - print(f"Running analysis task: {task}") - - # First, query the document database for relevant context - relevant_docs = self.query_documents(task, limit=3) - - if relevant_docs: - # Enhance the task with relevant document context - context = "\n\nRelevant Document Information:\n" - for i, doc in enumerate(relevant_docs, 1): - context += f"\nDocument {i}: {doc.get('document_name', 'Unknown')}\n" - context += f"Relevance Score: {doc.get('similarity_score', 0):.3f}\n" - context += ( - f"Content: {doc.get('chunk_text', '')[:500]}...\n" - ) - - enhanced_task = f"{task}\n\n{context}" - else: - enhanced_task = task - - # Run the agent - response = self.agent.run(enhanced_task) - return response - - def get_database_stats(self) -> Dict: - """ - Get statistics about the document database. - - Returns: - Dictionary containing database statistics - """ - return self.rag_memory.get_collection_stats() - - -def main(): - """ - Main function demonstrating the Qdrant RAG agent with document ingestion. - """ - from datetime import datetime - - # Example usage - print("πŸš€ Initializing Quantitative Trading RAG Agent...") - - # Initialize the agent (you can set environment variables for Qdrant cloud) - agent = QuantitativeTradingRAGAgent( - agent_name="Quantitative-Trading-RAG-Agent", - collection_name="financial_documents", - qdrant_url=os.getenv( - "QDRANT_URL" - ), # Optional: For cloud deployment - qdrant_api_key=os.getenv( - "QDRANT_API_KEY" - ), # Optional: For cloud deployment - model_name="claude-sonnet-4-20250514", - max_loops=1, - chunk_size=1000, - chunk_overlap=200, - ) - - # Example: Ingest documents from a directory - documents_path = "documents" # Path to your documents - if os.path.exists(documents_path): - print(f"Found documents directory: {documents_path}") - try: - agent.ingest_documents(documents_path) - except Exception as e: - print(f"Error ingesting documents: {e}") - print("Continuing without document ingestion...") - else: - print(f"Documents directory not found: {documents_path}") - print("Creating a sample document for demonstration...") - - # Create a sample document - try: - sample_doc = { - "file_path": "sample_financial_analysis.txt", - "file_name": "sample_financial_analysis.txt", - "file_type": ".txt", - "text_content": """ - Gold ETFs: A Comprehensive Investment Guide - - Gold ETFs (Exchange-Traded Funds) provide investors with exposure to gold prices - without the need to physically store the precious metal. These funds track the - price of gold and offer several advantages including liquidity, diversification, - and ease of trading. - - Top Gold ETFs include: - 1. SPDR Gold Shares (GLD) - Largest gold ETF with high liquidity - 2. iShares Gold Trust (IAU) - Lower expense ratio alternative - 3. Aberdeen Standard Physical Gold ETF (SGOL) - Swiss storage option - - Investment strategies for gold ETFs: - - Portfolio diversification (5-10% allocation) - - Inflation hedge - - Safe haven during market volatility - - Tactical trading opportunities - - Market analysis shows that gold has historically served as a store of value - and hedge against inflation. Recent market conditions have increased interest - in gold investments due to economic uncertainty and geopolitical tensions. - """, - "file_size": 1024, - "processed_at": datetime.utcnow().isoformat(), - } - - # Add the sample document to the RAG memory - memory_ids = agent.rag_memory.add_document(sample_doc) - print( - f"Added sample document with {len(memory_ids)} chunks" - ) - - except Exception as e: - print(f"Error creating sample document: {e}") - print("Continuing without sample document...") - - # Example: Query the database - print("\nπŸ“Š Querying document database...") - try: - query_results = agent.query_documents( - "gold ETFs investment strategies", limit=3 - ) - print(f"Found {len(query_results)} relevant document chunks") - - if query_results: - print("Sample results:") - for i, result in enumerate(query_results[:2], 1): - print( - f" {i}. {result.get('document_name', 'Unknown')} (Score: {result.get('similarity_score', 0):.3f})" - ) - else: - print( - "No documents found in database. This is expected if no documents were ingested." - ) - except Exception as e: - print(f"❌ Query failed: {e}") - - # Example: Run financial analysis - print("\nπŸ’Ή Running financial analysis...") - analysis_task = "What are the best top 3 ETFs for gold coverage and what are their key characteristics?" - try: - response = agent.run_analysis(analysis_task) - print("\nπŸ“ˆ Analysis Results:") - print(response) - except Exception as e: - print(f"❌ Analysis failed: {e}") - print("This might be due to API key or model access issues.") - print("Continuing with database statistics...") - - # Try a simpler query that doesn't require the LLM - print("\nπŸ” Trying simple document query instead...") - try: - simple_results = agent.query_documents( - "what do you see in the document?", limit=2 - ) - if simple_results: - print("Simple query results:") - for i, result in enumerate(simple_results, 1): - print( - f" {i}. {result.get('document_name', 'Unknown')}" - ) - print( - f" Content preview: {result.get('chunk_text', '')[:100]}..." - ) - else: - print("No results from simple query") - except Exception as simple_error: - print(f"Simple query also failed: {simple_error}") - - # Get database statistics - print("\nπŸ“Š Database Statistics:") - try: - stats = agent.get_database_stats() - for key, value in stats.items(): - print(f" {key}: {value}") - except Exception as e: - print(f"❌ Failed to get database statistics: {e}") - - print("\nβœ… Example completed successfully!") - print("πŸ’‘ To test with your own documents:") - print(" 1. Create a 'documents' directory") - print(" 2. Add PDF, TXT, or MD files") - print(" 3. Run the script again") +# Option 2: Local Qdrant server +# client = QdrantClient(host="localhost", port=6333) +# Option 3: Qdrant Cloud (recommended for production) +import os -if __name__ == "__main__": - main() +client = QdrantClient( + url=os.getenv("QDRANT_URL", "https://your-cluster.qdrant.io"), + api_key=os.getenv("QDRANT_API_KEY", "your-api-key"), +) + +# Create QdrantDB wrapper for RAG operations +rag_db = QdrantDB( + client=client, + embedding_model="text-embedding-3-small", + collection_name="knowledge_base", + distance=models.Distance.COSINE, + n_results=3, +) + +# Add documents to the knowledge base +documents = [ + "Qdrant is a vector database optimized for similarity search and AI applications.", + "RAG combines retrieval and generation for more accurate AI responses.", + "Vector embeddings enable semantic search across documents.", + "The swarms framework supports multiple memory backends including Qdrant.", +] + +# Method 1: Add documents individually +for doc in documents: + rag_db.add(doc) + +# Method 2: Batch add documents (more efficient for large datasets) +# Example with metadata +# documents_with_metadata = [ +# "Machine learning is a subset of artificial intelligence.", +# "Deep learning uses neural networks with multiple layers.", +# "Natural language processing enables computers to understand human language.", +# "Computer vision allows machines to interpret visual information.", +# "Reinforcement learning learns through interaction with an environment." +# ] +# +# metadata = [ +# {"category": "AI", "difficulty": "beginner", "topic": "overview"}, +# {"category": "ML", "difficulty": "intermediate", "topic": "neural_networks"}, +# {"category": "NLP", "difficulty": "intermediate", "topic": "language"}, +# {"category": "CV", "difficulty": "advanced", "topic": "vision"}, +# {"category": "RL", "difficulty": "advanced", "topic": "learning"} +# ] +# +# # Batch add with metadata +# doc_ids = rag_db.batch_add(documents_with_metadata, metadata=metadata, batch_size=3) +# print(f"Added {len(doc_ids)} documents in batch") +# +# # Query with metadata return +# results_with_metadata = rag_db.query( +# "What is artificial intelligence?", +# n_results=3, +# return_metadata=True +# ) +# +# for i, result in enumerate(results_with_metadata): +# print(f"\nResult {i+1}:") +# print(f" Document: {result['document']}") +# print(f" Category: {result['category']}") +# print(f" Difficulty: {result['difficulty']}") +# print(f" Topic: {result['topic']}") +# print(f" Score: {result['score']:.4f}") + +# Create agent with RAG capabilities +agent = Agent( + agent_name="RAG-Agent", + agent_description="Agent with Qdrant-powered RAG for enhanced knowledge retrieval", + model_name="gpt-4.1", + max_loops=1, + dynamic_temperature_enabled=True, + long_term_memory=rag_db, +) + +# Query with RAG +response = agent.run("What is Qdrant and how does it relate to RAG?") +print(response) diff --git a/pyproject.toml b/pyproject.toml index d1d2e088..10ad1565 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "8.6.2" +version = "8.6.3" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py index ea4e4b33..122ccb01 100644 --- a/swarms/agents/reasoning_agents.py +++ b/swarms/agents/reasoning_agents.py @@ -284,11 +284,11 @@ class ReasoningAgentRouter: The result of the reasoning process (format depends on agent and output_type). """ try: + swarm = self.select_swarm() + if self.swarm_type == "ReflexionAgent": - swarm = self.select_swarm() return swarm.run(tasks=[task], *args, **kwargs) else: - swarm = self.select_swarm() return swarm.run(task=task, *args, **kwargs) except Exception as e: raise ReasoningAgentExecutorError( From 73ed6279b5bc426847550a0338381ad8002fb429 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 17 Nov 2025 16:10:53 -0800 Subject: [PATCH 09/42] swarm router majority voting and readme --- README.md | 2 +- swarm_router_mv.py | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 swarm_router_mv.py diff --git a/README.md b/README.md index fa70d1dd..6423140f 100644 --- a/README.md +++ b/README.md @@ -833,7 +833,7 @@ Thank you for contributing to swarms. Your work is extremely appreciated and rec ----- -## Connect With Us +## Join the Swarms community Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights! diff --git a/swarm_router_mv.py b/swarm_router_mv.py new file mode 100644 index 00000000..dd691b78 --- /dev/null +++ b/swarm_router_mv.py @@ -0,0 +1,40 @@ +from swarms import SwarmRouter, Agent + +# Create specialized agents +research_agent = Agent( + agent_name="Research-Analyst", + agent_description="Specialized in comprehensive research and data gathering", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +analysis_agent = Agent( + agent_name="Data-Analyst", + agent_description="Expert in data analysis and pattern recognition", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_agent = Agent( + agent_name="Strategy-Consultant", + agent_description="Specialized in strategic planning and recommendations", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +router = SwarmRouter( + name="SwarmRouter", + description="Routes tasks to specialized agents based on their capabilities", + agents=[research_agent, analysis_agent, strategy_agent], + swarm_type="MajorityVoting", + max_loops=1, + verbose=False, +) + +result = router.run( + "Conduct a research analysis on water stocks and etfs" +) +print(result) \ No newline at end of file From e721620cc8da1ee71010a1537463a1874e8f94bd Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 17 Nov 2025 16:11:08 -0800 Subject: [PATCH 10/42] swarm router majority voting and readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6423140f..89e713ec 100644 --- a/README.md +++ b/README.md @@ -833,7 +833,7 @@ Thank you for contributing to swarms. Your work is extremely appreciated and rec ----- -## Join the Swarms community +## Join the Swarms community πŸ‘ΎπŸ‘ΎπŸ‘Ύ Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights! From 3db9378312c6687ca32cb4636c0519cb56cdb96b Mon Sep 17 00:00:00 2001 From: Hugh <155223694+hughiwnl@users.noreply.github.com> Date: Mon, 17 Nov 2025 16:53:52 -0800 Subject: [PATCH 11/42] added tests for custom_agent.py --- tests/structs/test_custom_agent.py | 370 +++++++++++++++++++++++++++++ 1 file changed, 370 insertions(+) create mode 100644 tests/structs/test_custom_agent.py diff --git a/tests/structs/test_custom_agent.py b/tests/structs/test_custom_agent.py new file mode 100644 index 00000000..3cdeda25 --- /dev/null +++ b/tests/structs/test_custom_agent.py @@ -0,0 +1,370 @@ +import pytest +import json +from unittest.mock import Mock, patch, AsyncMock +from loguru import logger +from swarms.structs.custom_agent import CustomAgent, AgentResponse + +try: + import pytest_asyncio + ASYNC_AVAILABLE = True +except ImportError: + ASYNC_AVAILABLE = False + pytest_asyncio = None + + +def create_test_custom_agent(): + return CustomAgent( + name="TestAgent", + description="Test agent for unit testing", + base_url="https://api.test.com", + endpoint="v1/test", + headers={"Authorization": "Bearer test-token"}, + timeout=10.0, + verify_ssl=True, + ) + + +@pytest.fixture +def sample_custom_agent(): + return create_test_custom_agent() + + +def test_custom_agent_initialization(): + try: + custom_agent_instance = CustomAgent( + name="TestAgent", + description="Test description", + base_url="https://api.example.com", + endpoint="v1/endpoint", + headers={"Content-Type": "application/json"}, + timeout=30.0, + verify_ssl=True, + ) + assert custom_agent_instance.base_url == "https://api.example.com" + assert custom_agent_instance.endpoint == "v1/endpoint" + assert custom_agent_instance.timeout == 30.0 + assert custom_agent_instance.verify_ssl is True + assert "Content-Type" in custom_agent_instance.default_headers + logger.info("CustomAgent initialized successfully") + except Exception as e: + logger.error(f"Failed to initialize CustomAgent: {e}") + raise + + +def test_custom_agent_initialization_with_default_headers(sample_custom_agent): + try: + custom_agent_no_headers = CustomAgent( + name="TestAgent", + description="Test", + base_url="https://api.test.com", + endpoint="test", + ) + assert "Content-Type" in custom_agent_no_headers.default_headers + assert ( + custom_agent_no_headers.default_headers["Content-Type"] + == "application/json" + ) + logger.debug("Default Content-Type header added correctly") + except Exception as e: + logger.error(f"Failed to test default headers: {e}") + raise + + +def test_custom_agent_url_normalization(): + try: + custom_agent_with_slashes = CustomAgent( + name="TestAgent", + description="Test", + base_url="https://api.test.com/", + endpoint="/v1/test", + ) + assert custom_agent_with_slashes.base_url == "https://api.test.com" + assert custom_agent_with_slashes.endpoint == "v1/test" + logger.debug("URL normalization works correctly") + except Exception as e: + logger.error(f"Failed to test URL normalization: {e}") + raise + + +def test_prepare_headers(sample_custom_agent): + try: + prepared_headers = sample_custom_agent._prepare_headers() + assert "Authorization" in prepared_headers + assert prepared_headers["Authorization"] == "Bearer test-token" + + additional_headers = {"X-Custom-Header": "custom-value"} + prepared_headers_with_additional = ( + sample_custom_agent._prepare_headers(additional_headers) + ) + assert prepared_headers_with_additional["X-Custom-Header"] == "custom-value" + assert prepared_headers_with_additional["Authorization"] == "Bearer test-token" + logger.debug("Header preparation works correctly") + except Exception as e: + logger.error(f"Failed to test prepare_headers: {e}") + raise + + +def test_prepare_payload_dict(sample_custom_agent): + try: + payload_dict = {"key": "value", "number": 123} + prepared_payload = sample_custom_agent._prepare_payload(payload_dict) + assert isinstance(prepared_payload, str) + parsed = json.loads(prepared_payload) + assert parsed["key"] == "value" + assert parsed["number"] == 123 + logger.debug("Dictionary payload prepared correctly") + except Exception as e: + logger.error(f"Failed to test prepare_payload with dict: {e}") + raise + + +def test_prepare_payload_string(sample_custom_agent): + try: + payload_string = '{"test": "value"}' + prepared_payload = sample_custom_agent._prepare_payload(payload_string) + assert prepared_payload == payload_string + logger.debug("String payload prepared correctly") + except Exception as e: + logger.error(f"Failed to test prepare_payload with string: {e}") + raise + + +def test_prepare_payload_bytes(sample_custom_agent): + try: + payload_bytes = b'{"test": "value"}' + prepared_payload = sample_custom_agent._prepare_payload(payload_bytes) + assert prepared_payload == payload_bytes + logger.debug("Bytes payload prepared correctly") + except Exception as e: + logger.error(f"Failed to test prepare_payload with bytes: {e}") + raise + + +def test_parse_response_success(sample_custom_agent): + try: + mock_response = Mock() + mock_response.status_code = 200 + mock_response.text = '{"message": "success"}' + mock_response.headers = {"content-type": "application/json"} + mock_response.json.return_value = {"message": "success"} + + parsed_response = sample_custom_agent._parse_response(mock_response) + assert isinstance(parsed_response, AgentResponse) + assert parsed_response.status_code == 200 + assert parsed_response.success is True + assert parsed_response.json_data == {"message": "success"} + assert parsed_response.error_message is None + logger.debug("Successful response parsed correctly") + except Exception as e: + logger.error(f"Failed to test parse_response success: {e}") + raise + + +def test_parse_response_error(sample_custom_agent): + try: + mock_response = Mock() + mock_response.status_code = 404 + mock_response.text = "Not Found" + mock_response.headers = {"content-type": "text/plain"} + + parsed_response = sample_custom_agent._parse_response(mock_response) + assert isinstance(parsed_response, AgentResponse) + assert parsed_response.status_code == 404 + assert parsed_response.success is False + assert parsed_response.error_message == "HTTP 404" + logger.debug("Error response parsed correctly") + except Exception as e: + logger.error(f"Failed to test parse_response error: {e}") + raise + + +def test_extract_content_openai_format(sample_custom_agent): + try: + openai_response = { + "choices": [ + { + "message": { + "content": "This is the response content" + } + } + ] + } + extracted_content = sample_custom_agent._extract_content(openai_response) + assert extracted_content == "This is the response content" + logger.debug("OpenAI format content extracted correctly") + except Exception as e: + logger.error(f"Failed to test extract_content OpenAI format: {e}") + raise + + +def test_extract_content_anthropic_format(sample_custom_agent): + try: + anthropic_response = { + "content": [ + {"text": "First part "}, + {"text": "second part"} + ] + } + extracted_content = sample_custom_agent._extract_content(anthropic_response) + assert extracted_content == "First part second part" + logger.debug("Anthropic format content extracted correctly") + except Exception as e: + logger.error(f"Failed to test extract_content Anthropic format: {e}") + raise + + +def test_extract_content_generic_format(sample_custom_agent): + try: + generic_response = {"text": "Generic response text"} + extracted_content = sample_custom_agent._extract_content(generic_response) + assert extracted_content == "Generic response text" + logger.debug("Generic format content extracted correctly") + except Exception as e: + logger.error(f"Failed to test extract_content generic format: {e}") + raise + + +@patch("swarms.structs.custom_agent.httpx.Client") +def test_run_success(mock_client_class, sample_custom_agent): + try: + mock_response = Mock() + mock_response.status_code = 200 + mock_response.text = '{"choices": [{"message": {"content": "Success"}}]}' + mock_response.json.return_value = { + "choices": [{"message": {"content": "Success"}}] + } + mock_response.headers = {"content-type": "application/json"} + + mock_client_instance = Mock() + mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) + mock_client_instance.__exit__ = Mock(return_value=None) + mock_client_instance.post.return_value = mock_response + mock_client_class.return_value = mock_client_instance + + test_payload = {"message": "test"} + result = sample_custom_agent.run(test_payload) + + assert result == "Success" + logger.info("Run method executed successfully") + except Exception as e: + logger.error(f"Failed to test run success: {e}") + raise + + +@patch("swarms.structs.custom_agent.httpx.Client") +def test_run_error_response(mock_client_class, sample_custom_agent): + try: + mock_response = Mock() + mock_response.status_code = 500 + mock_response.text = "Internal Server Error" + + mock_client_instance = Mock() + mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) + mock_client_instance.__exit__ = Mock(return_value=None) + mock_client_instance.post.return_value = mock_response + mock_client_class.return_value = mock_client_instance + + test_payload = {"message": "test"} + result = sample_custom_agent.run(test_payload) + + assert "Error: HTTP 500" in result + logger.debug("Error response handled correctly") + except Exception as e: + logger.error(f"Failed to test run error response: {e}") + raise + + +@patch("swarms.structs.custom_agent.httpx.Client") +def test_run_request_error(mock_client_class, sample_custom_agent): + try: + import httpx + + mock_client_instance = Mock() + mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) + mock_client_instance.__exit__ = Mock(return_value=None) + mock_client_instance.post.side_effect = httpx.RequestError("Connection failed") + mock_client_class.return_value = mock_client_instance + + test_payload = {"message": "test"} + result = sample_custom_agent.run(test_payload) + + assert "Request error" in result + logger.debug("Request error handled correctly") + except Exception as e: + logger.error(f"Failed to test run request error: {e}") + raise + + +@pytest.mark.skipif(not ASYNC_AVAILABLE, reason="pytest-asyncio not installed") +@pytest.mark.asyncio +@patch("swarms.structs.custom_agent.httpx.AsyncClient") +async def test_run_async_success(mock_async_client_class, sample_custom_agent): + try: + mock_response = Mock() + mock_response.status_code = 200 + mock_response.text = '{"content": [{"text": "Async Success"}]}' + mock_response.json.return_value = { + "content": [{"text": "Async Success"}] + } + mock_response.headers = {"content-type": "application/json"} + + mock_client_instance = AsyncMock() + mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) + mock_client_instance.__aexit__ = AsyncMock(return_value=None) + mock_client_instance.post = AsyncMock(return_value=mock_response) + mock_async_client_class.return_value = mock_client_instance + + test_payload = {"message": "test"} + result = await sample_custom_agent.run_async(test_payload) + + assert result == "Async Success" + logger.info("Run_async method executed successfully") + except Exception as e: + logger.error(f"Failed to test run_async success: {e}") + raise + + +@pytest.mark.skipif(not ASYNC_AVAILABLE, reason="pytest-asyncio not installed") +@pytest.mark.asyncio +@patch("swarms.structs.custom_agent.httpx.AsyncClient") +async def test_run_async_error_response(mock_async_client_class, sample_custom_agent): + try: + mock_response = Mock() + mock_response.status_code = 400 + mock_response.text = "Bad Request" + + mock_client_instance = AsyncMock() + mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) + mock_client_instance.__aexit__ = AsyncMock(return_value=None) + mock_client_instance.post = AsyncMock(return_value=mock_response) + mock_async_client_class.return_value = mock_client_instance + + test_payload = {"message": "test"} + result = await sample_custom_agent.run_async(test_payload) + + assert "Error: HTTP 400" in result + logger.debug("Async error response handled correctly") + except Exception as e: + logger.error(f"Failed to test run_async error response: {e}") + raise + + +def test_agent_response_dataclass(): + try: + agent_response_instance = AgentResponse( + status_code=200, + content="Success", + headers={"content-type": "application/json"}, + json_data={"key": "value"}, + success=True, + error_message=None, + ) + assert agent_response_instance.status_code == 200 + assert agent_response_instance.content == "Success" + assert agent_response_instance.success is True + assert agent_response_instance.error_message is None + logger.debug("AgentResponse dataclass created correctly") + except Exception as e: + logger.error(f"Failed to test AgentResponse dataclass: {e}") + raise + From f9c999fb2ac9ad3a763fa2b1cf2970d3e8c2bdc7 Mon Sep 17 00:00:00 2001 From: Hugh <155223694+hughiwnl@users.noreply.github.com> Date: Mon, 17 Nov 2025 20:53:27 -0800 Subject: [PATCH 12/42] added tests for deep_discussion.py --- tests/structs/test_deep_discussion.py | 438 ++++++++++++++++++++++++++ 1 file changed, 438 insertions(+) create mode 100644 tests/structs/test_deep_discussion.py diff --git a/tests/structs/test_deep_discussion.py b/tests/structs/test_deep_discussion.py new file mode 100644 index 00000000..f83a00c5 --- /dev/null +++ b/tests/structs/test_deep_discussion.py @@ -0,0 +1,438 @@ +import pytest +from loguru import logger +from swarms.structs.deep_discussion import one_on_one_debate +from swarms.structs.agent import Agent + + +def create_function_agent(name: str, system_prompt: str = None): + if system_prompt is None: + system_prompt = f"You are {name}. Provide thoughtful responses." + + agent = Agent( + agent_name=name, + agent_description=f"Test agent {name}", + system_prompt=system_prompt, + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + return agent + + +@pytest.fixture +def sample_agents(): + agent1 = create_function_agent( + "Debater1", + "You are a debater who argues for the affirmative position. Be concise and direct." + ) + agent2 = create_function_agent( + "Debater2", + "You are a debater who argues for the negative position. Be concise and direct." + ) + return [agent1, agent2] + + +@pytest.fixture +def sample_task(): + return "Should artificial intelligence be regulated?" + + +def test_one_on_one_debate_basic(sample_agents, sample_task): + try: + result = one_on_one_debate( + max_loops=2, + task=sample_task, + agents=sample_agents, + ) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("Basic one-on-one debate test passed") + except Exception as e: + logger.error(f"Failed to test basic debate: {e}") + raise + + +def test_one_on_one_debate_multiple_loops(sample_agents, sample_task): + try: + max_loops = 3 + result = one_on_one_debate( + max_loops=max_loops, + task=sample_task, + agents=sample_agents, + ) + assert result is not None + assert isinstance(result, str) + assert len(result) > 0 + + result_list = one_on_one_debate( + max_loops=max_loops, + task=sample_task, + agents=sample_agents, + output_type="list", + ) + assert result_list is not None + assert isinstance(result_list, list) + assert len(result_list) == max_loops + logger.info("Multiple loops debate test passed") + except Exception as e: + logger.error(f"Failed to test multiple loops debate: {e}") + raise + + +def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): + try: + max_loops = 4 + result = one_on_one_debate( + max_loops=max_loops, + task=sample_task, + agents=sample_agents, + output_type="list", + ) + assert result is not None + assert isinstance(result, list) + assert len(result) == max_loops + + agent_names = [] + for msg in result: + if isinstance(msg, dict): + agent_names.append(msg.get("role", "")) + elif isinstance(msg, str): + if "Debater1" in msg: + agent_names.append("Debater1") + elif "Debater2" in msg: + agent_names.append("Debater2") + assert agent_names is not None + assert len(agent_names) >= 0 + if len(agent_names) > 0: + assert "Debater1" in agent_names or "Debater2" in agent_names + + if len(agent_names) > 0: + debater1_count = agent_names.count("Debater1") + debater2_count = agent_names.count("Debater2") + total_count = debater1_count + debater2_count + assert total_count > 0 + logger.info("Agent alternation test passed") + except Exception as e: + logger.error(f"Failed to test agent alternation: {e}") + raise + + +def test_one_on_one_debate_with_image(sample_agents): + try: + task = "Analyze this image and discuss its implications" + img = "test_image.jpg" + result = one_on_one_debate( + max_loops=2, + task=task, + agents=sample_agents, + img=img, + ) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("Debate with image test passed") + except Exception as e: + logger.error(f"Failed to test debate with image: {e}") + raise + + +def test_one_on_one_debate_custom_output_types(sample_agents, sample_task): + try: + output_type_checks = { + "str": str, + "str-all-except-first": str, + "dict": list, + "json": str, + "list": list, + } + for output_type, expected_type in output_type_checks.items(): + result = one_on_one_debate( + max_loops=1, + task=sample_task, + agents=sample_agents, + output_type=output_type, + ) + assert result is not None + assert isinstance(result, expected_type) + if isinstance(result, (str, list)): + assert len(result) >= 0 + logger.info("Custom output types test passed") + except Exception as e: + logger.error(f"Failed to test custom output types: {e}") + raise + + +def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): + try: + result = one_on_one_debate( + max_loops=2, + task=sample_task, + agents=sample_agents, + output_type="list", + ) + assert result is not None + assert isinstance(result, list) + assert len(result) == 2 + + for message in result: + assert message is not None + assert isinstance(message, (str, dict)) + if isinstance(message, str): + assert len(message) >= 0 + elif isinstance(message, dict): + assert "role" in message or "content" in message + logger.info("List output structure test passed") + except Exception as e: + logger.error(f"Failed to test list output structure: {e}") + raise + + +def test_one_on_one_debate_too_few_agents(sample_task): + try: + single_agent = [create_function_agent("SoloAgent")] + with pytest.raises(ValueError, match="There must be exactly two agents"): + one_on_one_debate( + max_loops=1, + task=sample_task, + agents=single_agent, + ) + logger.info("Too few agents validation test passed") + except Exception as e: + logger.error(f"Failed to test too few agents: {e}") + raise + + +def test_one_on_one_debate_too_many_agents(sample_task): + try: + many_agents = [ + create_function_agent("Agent1"), + create_function_agent("Agent2"), + create_function_agent("Agent3"), + ] + with pytest.raises(ValueError, match="There must be exactly two agents"): + one_on_one_debate( + max_loops=1, + task=sample_task, + agents=many_agents, + ) + logger.info("Too many agents validation test passed") + except Exception as e: + logger.error(f"Failed to test too many agents: {e}") + raise + + +def test_one_on_one_debate_empty_agents(sample_task): + try: + empty_agents = [] + with pytest.raises(ValueError, match="There must be exactly two agents"): + one_on_one_debate( + max_loops=1, + task=sample_task, + agents=empty_agents, + ) + logger.info("Empty agents validation test passed") + except Exception as e: + logger.error(f"Failed to test empty agents: {e}") + raise + + +def test_one_on_one_debate_none_agents(sample_task): + try: + with pytest.raises((ValueError, TypeError, AttributeError)): + one_on_one_debate( + max_loops=1, + task=sample_task, + agents=None, + ) + logger.info("None agents validation test passed") + except Exception as e: + logger.error(f"Failed to test None agents: {e}") + raise + + +def test_one_on_one_debate_none_task(sample_agents): + try: + result = one_on_one_debate( + max_loops=1, + task=None, + agents=sample_agents, + ) + assert result is not None + logger.info("None task test passed") + except Exception as e: + logger.error(f"Failed to test None task: {e}") + raise + + +def test_one_on_one_debate_invalid_output_type(sample_agents, sample_task): + try: + with pytest.raises((ValueError, TypeError)): + one_on_one_debate( + max_loops=1, + task=sample_task, + agents=sample_agents, + output_type="invalid_type", + ) + logger.info("Invalid output type validation test passed") + except Exception as e: + logger.error(f"Failed to test invalid output type: {e}") + raise + + +def test_one_on_one_debate_zero_loops(sample_agents, sample_task): + try: + result = one_on_one_debate( + max_loops=0, + task=sample_task, + agents=sample_agents, + ) + assert result is not None + assert isinstance(result, str) + + result_list = one_on_one_debate( + max_loops=0, + task=sample_task, + agents=sample_agents, + output_type="list", + ) + assert result_list is not None + assert isinstance(result_list, list) + assert len(result_list) == 0 + logger.info("Zero loops debate test passed") + except Exception as e: + logger.error(f"Failed to test zero loops: {e}") + raise + + +def test_one_on_one_debate_different_topics(sample_agents): + try: + topics = [ + "What is the meaning of life?", + "Should we colonize Mars?", + "Is technology making us more or less connected?", + ] + for topic in topics: + result = one_on_one_debate( + max_loops=2, + task=topic, + agents=sample_agents, + ) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("Different topics debate test passed") + except Exception as e: + logger.error(f"Failed to test different topics: {e}") + raise + + +def test_one_on_one_debate_long_conversation(sample_agents, sample_task): + try: + max_loops = 5 + result = one_on_one_debate( + max_loops=max_loops, + task=sample_task, + agents=sample_agents, + output_type="list", + ) + assert result is not None + assert isinstance(result, list) + assert len(result) == max_loops + logger.info("Long conversation debate test passed") + except Exception as e: + logger.error(f"Failed to test long conversation: {e}") + raise + + +def test_one_on_one_debate_different_agent_personalities(): + try: + agent1 = create_function_agent( + "Optimist", + "You are an optimist. Always see the positive side. Be concise." + ) + agent2 = create_function_agent( + "Pessimist", + "You are a pessimist. Always see the negative side. Be concise." + ) + agents = [agent1, agent2] + task = "What is the future of AI?" + result = one_on_one_debate( + max_loops=2, + task=task, + agents=agents, + output_type="list", + ) + assert result is not None + assert isinstance(result, list) + assert len(result) == 2 + + agent_names = [] + for msg in result: + if isinstance(msg, dict): + agent_names.append(msg.get("role", "")) + elif isinstance(msg, str): + if "Optimist" in msg: + agent_names.append("Optimist") + elif "Pessimist" in msg: + agent_names.append("Pessimist") + assert agent_names is not None + assert len(agent_names) >= 0 + if len(agent_names) > 0: + assert "Optimist" in agent_names or "Pessimist" in agent_names + logger.info("Different agent personalities test passed") + except Exception as e: + logger.error(f"Failed to test different personalities: {e}") + raise + + +def test_one_on_one_debate_conversation_length_matches_loops(sample_agents, sample_task): + try: + for max_loops in [1, 2, 3, 4]: + result = one_on_one_debate( + max_loops=max_loops, + task=sample_task, + agents=sample_agents, + output_type="list", + ) + assert result is not None + assert isinstance(result, list) + assert len(result) == max_loops + logger.info("Conversation length matches loops test passed") + except Exception as e: + logger.error(f"Failed to test conversation length: {e}") + raise + + +def test_one_on_one_debate_both_agents_participate(sample_agents, sample_task): + try: + result = one_on_one_debate( + max_loops=2, + task=sample_task, + agents=sample_agents, + output_type="list", + ) + assert result is not None + assert isinstance(result, list) + assert len(result) == 2 + + roles = [] + for msg in result: + if isinstance(msg, dict) and "role" in msg: + roles.append(msg.get("role", "")) + elif isinstance(msg, str): + if "Debater1" in msg: + roles.append("Debater1") + elif "Debater2" in msg: + roles.append("Debater2") + assert roles is not None + assert len(roles) >= 0 + if len(roles) > 0: + unique_roles = set(roles) + assert unique_roles is not None + assert len(unique_roles) >= 1 + assert "Debater1" in roles or "Debater2" in roles + logger.info("Both agents participate test passed") + except Exception as e: + logger.error(f"Failed to test both agents participate: {e}") + raise From 8c20762a3aca723020f9650909fefc228bdc0581 Mon Sep 17 00:00:00 2001 From: Aksh Parekh Date: Mon, 17 Nov 2025 21:16:56 -0800 Subject: [PATCH 13/42] [REORGANIZE] SwarmType list organization --- swarms/structs/swarm_router.py | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index 15b1500d..b078d970 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -2,7 +2,7 @@ import concurrent.futures import json import os import traceback -from typing import Any, Callable, Dict, List, Literal, Optional, Union +from typing import Any, Callable, Dict, List, Literal, Optional, Union, get_args from pydantic import BaseModel, Field @@ -46,6 +46,7 @@ SwarmType = Literal[ "CouncilAsAJudge", "InteractiveGroupChat", "HeavySwarm", + "BatchedGridWorkflow", ] @@ -273,23 +274,7 @@ class SwarmRouter: ) # Validate swarm type is a valid string - valid_swarm_types = [ - "AgentRearrange", - "MixtureOfAgents", - "SequentialWorkflow", - "ConcurrentWorkflow", - "GroupChat", - "MultiAgentRouter", - "AutoSwarmBuilder", - "HiearchicalSwarm", - "auto", - "MajorityVoting", - "MALT", - "CouncilAsAJudge", - "InteractiveGroupChat", - "HeavySwarm", - "BatchedGridWorkflow", - ] + valid_swarm_types = get_args(SwarmType) if not isinstance(self.swarm_type, str): raise SwarmRouterConfigError( From cc5b3d8bfdc2b104d05bea835b00d2a809c80f91 Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Mon, 17 Nov 2025 21:25:21 -0800 Subject: [PATCH 14/42] removed execution type: execute-swarms-router.py bc not handled --- docs/swarms/structs/auto_swarm_builder.md | 2 -- swarms/structs/auto_swarm_builder.py | 1 - 2 files changed, 3 deletions(-) diff --git a/docs/swarms/structs/auto_swarm_builder.md b/docs/swarms/structs/auto_swarm_builder.md index b06a7e95..f278991c 100644 --- a/docs/swarms/structs/auto_swarm_builder.md +++ b/docs/swarms/structs/auto_swarm_builder.md @@ -40,7 +40,6 @@ The `execution_type` parameter controls how the AutoSwarmBuilder operates: | Execution Type | Description | |----------------------------------|-----------------------------------------------------------| | **"return-agents"** | Creates and returns agent specifications as a dictionary (default) | -| **"execute-swarm-router"** | Executes the swarm router with the created agents | | **"return-swarm-router-config"** | Returns the swarm router configuration as a dictionary | | **"return-agents-objects"** | Returns agent objects created from specifications | @@ -602,7 +601,6 @@ for agent in agents: - Use `verbose=True` during development for debugging - Choose the right `execution_type` for your use case: - Use `"return-agents"` for getting agent specifications as dictionary (default) - - Use `"execute-swarm-router"` for executing the swarm router with created agents - Use `"return-swarm-router-config"` for analyzing swarm architecture - Use `"return-agents-objects"` for getting agent objects created from specifications - Set `max_tokens` appropriately based on expected response length diff --git a/swarms/structs/auto_swarm_builder.py b/swarms/structs/auto_swarm_builder.py index 0a9bd689..514cb79c 100644 --- a/swarms/structs/auto_swarm_builder.py +++ b/swarms/structs/auto_swarm_builder.py @@ -16,7 +16,6 @@ load_dotenv() execution_types = [ "return-agents", - "execute-swarm-router", "return-swarm-router-config", "return-agents-objects", ] From 129a10a872a23105910d4bbc58752f2d2ded7ed1 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 19 Nov 2025 17:25:07 -0800 Subject: [PATCH 15/42] [FEAT][DebateWithJudge] --- docs/mkdocs.yml | 1 + docs/swarms/structs/debate_with_judge.md | 677 ++++++++++++++++++ .../multi_agent/debate_examples/README.md | 46 +- .../business_strategy_debate_example.py | 87 +++ .../debate_with_judge_example.py | 61 ++ .../debate_examples/policy_debate_example.py | 83 +++ .../technical_architecture_debate_example.py | 71 ++ .../swarm_router/swarm_router_mv.py | 2 +- .../ire_example.py | 1 - swarms/structs/__init__.py | 2 + swarms/structs/debate_with_judge.py | 346 +++++++++ 11 files changed, 1373 insertions(+), 4 deletions(-) create mode 100644 docs/swarms/structs/debate_with_judge.md create mode 100644 examples/multi_agent/debate_examples/business_strategy_debate_example.py create mode 100644 examples/multi_agent/debate_examples/debate_with_judge_example.py create mode 100644 examples/multi_agent/debate_examples/policy_debate_example.py create mode 100644 examples/multi_agent/debate_examples/technical_architecture_debate_example.py rename swarm_router_mv.py => examples/multi_agent/swarm_router/swarm_router_mv.py (98%) create mode 100644 swarms/structs/debate_with_judge.py diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index f5f2c81c..1619374f 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -271,6 +271,7 @@ nav: - Overview: "swarms/structs/overview.md" - Custom Multi Agent Architectures: "swarms/structs/custom_swarm.md" - Debate Multi-Agent Architectures: "swarms/structs/orchestration_methods.md" + - DebateWithJudge: "swarms/structs/debate_with_judge.md" - MajorityVoting: "swarms/structs/majorityvoting.md" - RoundRobin: "swarms/structs/round_robin_swarm.md" - Mixture of Agents: "swarms/structs/moa.md" diff --git a/docs/swarms/structs/debate_with_judge.md b/docs/swarms/structs/debate_with_judge.md new file mode 100644 index 00000000..89341f77 --- /dev/null +++ b/docs/swarms/structs/debate_with_judge.md @@ -0,0 +1,677 @@ +# DebateWithJudge Module Documentation + +The `DebateWithJudge` module provides a sophisticated debate architecture with self-refinement through a judge agent. This system enables two agents (Pro and Con) to debate a topic, with a Judge agent evaluating their arguments and providing refined synthesis. The process repeats for N rounds to progressively refine the answer. + +## Architecture + +```mermaid +graph TD + A[DebateWithJudge System] --> B[Initialize Pro, Con, and Judge Agents] + B --> C[Start with Initial Topic] + C --> D[Round Loop: max_rounds] + D --> E[Pro Agent Presents Argument] + E --> F[Con Agent Presents Counter-Argument] + F --> G[Judge Agent Evaluates Both] + G --> H[Judge Provides Synthesis] + H --> I{More Rounds?} + I -->|Yes| D + I -->|No| J[Format Final Output] + J --> K[Return Result] +``` + +### Key Concepts + +| Concept | Description | +|--------------------------|----------------------------------------------------------------------------------------------| +| Debate Architecture | A structured process where two agents present opposing arguments on a topic | +| Pro Agent | The agent arguing in favor of a position | +| Con Agent | The agent arguing against a position | +| Judge Agent | An impartial evaluator that analyzes both arguments and provides synthesis | +| Iterative Refinement | The process repeats for multiple rounds, each round building upon the judge's previous synthesis | +| Progressive Improvement | Each round refines the answer by incorporating feedback and addressing weaknesses | + +## Class Definition: `DebateWithJudge` + +```python +class DebateWithJudge: + def __init__( + self, + pro_agent: Agent, + con_agent: Agent, + judge_agent: Agent, + max_rounds: int = 3, + output_type: str = "str-all-except-first", + verbose: bool = True, + ): +``` + +### Constructor Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `pro_agent` | `Agent` | Required | The agent arguing in favor (Pro position) | +| `con_agent` | `Agent` | Required | The agent arguing against (Con position) | +| `judge_agent` | `Agent` | Required | The judge agent that evaluates arguments and provides synthesis | +| `max_rounds` | `int` | `3` | Maximum number of debate rounds to execute | +| `output_type` | `str` | `"str-all-except-first"` | Format for the output conversation history | +| `verbose` | `bool` | `True` | Whether to enable verbose logging | + +## API Reference + +### Core Methods + +#### `run(task: str) -> Union[str, List, dict]` + +Executes the debate with judge refinement process for a single task and returns the refined result. + +**Signature:** + +```python +def run(self, task: str) -> Union[str, List, dict] +``` + +**Parameters:** + +- `task` (`str`): The initial topic or question to debate + +**Returns:** + +- `Union[str, List, dict]`: The formatted conversation history or final refined answer, depending on `output_type` + +**Process Flow:** + +1. **Task Validation**: Validates that the task is a non-empty string +2. **Agent Initialization**: Initializes all three agents with their respective roles and the initial task context +3. **Multi-Round Execution**: For each round (up to `max_rounds`): + - **Pro Argument**: Pro agent presents an argument in favor of the current topic + + - **Con Counter-Argument**: Con agent presents a counter-argument, addressing the Pro's points + + - **Judge Evaluation**: Judge agent evaluates both arguments, identifies strengths and weaknesses + + - **Synthesis Generation**: Judge provides a refined synthesis that incorporates the best elements from both sides + + - **Topic Refinement**: Judge's synthesis becomes the topic for the next round +4. **Result Formatting**: Returns the final result formatted according to `output_type` + +**Example:** + +```python +from swarms import Agent, DebateWithJudge + +# Create the Pro agent (arguing in favor) +pro_agent = Agent( + agent_name="Pro-Agent", + system_prompt=( + "You are a skilled debater who argues in favor of positions. " + "You present well-reasoned arguments with evidence, examples, " + "and logical reasoning. You are persuasive and articulate." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the Con agent (arguing against) +con_agent = Agent( + agent_name="Con-Agent", + system_prompt=( + "You are a skilled debater who argues against positions. " + "You present strong counter-arguments with evidence, examples, " + "and logical reasoning. You identify weaknesses in opposing " + "arguments and provide compelling alternatives." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the Judge agent (evaluates and synthesizes) +judge_agent = Agent( + agent_name="Judge-Agent", + system_prompt=( + "You are an impartial judge who evaluates debates. " + "You carefully analyze arguments from both sides, identify " + "strengths and weaknesses, and provide balanced synthesis. " + "You may declare a winner or provide a refined answer that " + "incorporates the best elements from both arguments." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the DebateWithJudge system +debate_system = DebateWithJudge( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + max_rounds=3, + output_type="str-all-except-first", + verbose=True, +) + +# Define the debate topic +topic = ( + "Should artificial intelligence be regulated by governments? " + "Discuss the balance between innovation and safety." +) + +# Run the debate +result = debate_system.run(task=topic) +print(result) +``` + +**Raises:** + +- `ValueError`: If task is None or empty, or if any required agents are None, or if max_rounds is less than 1 + +--- + +#### `batched_run(tasks: List[str]) -> List[str]` + +Executes the debate with judge refinement process for multiple tasks sequentially, processing each task through the complete multi-round debate process. + +**Signature:** + +```python +def batched_run(self, tasks: List[str]) -> List[str] +``` + +**Parameters:** + +- `tasks` (`List[str]`): List of topics or questions to debate + +**Returns:** + +- `List[str]`: List of final refined answers, one for each input task + +**Process Flow:** + +1. **Sequential Processing**: Processes each task in the input list one by one +2. **Independent Execution**: Each task runs through the complete multi-round debate process independently +3. **Result Collection**: Collects and returns all results in the same order as input tasks + +**Example:** + +```python +# Define multiple debate topics +debate_topics = [ + "Should remote work become the standard for knowledge workers?", + "Is cryptocurrency a viable alternative to traditional banking?", + "Should social media platforms be held accountable for content moderation?", + "Are electric vehicles the future of transportation?" +] + +# Execute batch processing +results = debate_system.batched_run(debate_topics) + +# Process results +for topic, result in zip(debate_topics, results): + print(result) +``` + +**Performance Considerations:** + +| Consideration | Description | +|---------------------------------------------|---------------------------------------------------------------------------| +| Sequential Processing | Tasks are processed one after another, not in parallel | +| Independent Conversation History | Each task maintains its own conversation history | +| Memory Usage | Scales with the number of tasks and the length of each conversation | +| Total Execution Time | Equals the sum of all individual task execution times | + +--- + +### Helper Methods + +#### `get_conversation_history() -> List[dict]` + +Get the full conversation history from the debate. + +**Signature:** + +```python +def get_conversation_history(self) -> List[dict] +``` + +**Returns:** + +- `List[dict]`: List of message dictionaries containing the conversation history + +**Example:** + +```python +# Run a debate +result = debate_system.run("Should AI be regulated?") + +# Get the full conversation history +history = debate_system.get_conversation_history() +print(history) +``` + +--- + +#### `get_final_answer() -> str` + +Get the final refined answer from the judge. + +**Signature:** + +```python +def get_final_answer(self) -> str +``` + +**Returns:** + +- `str`: The content of the final judge synthesis + +**Example:** + +```python +# Run a debate +result = debate_system.run("Should AI be regulated?") + +# Get just the final answer +final_answer = debate_system.get_final_answer() +print(final_answer) +``` + +--- + +### Properties + +| Property | Type | Description | +|----------|------|-------------| +| `pro_agent` | `Agent` | The agent arguing in favor (Pro position) | +| `con_agent` | `Agent` | The agent arguing against (Con position) | +| `judge_agent` | `Agent` | The judge agent that evaluates arguments | +| `max_rounds` | `int` | Maximum number of debate rounds | +| `output_type` | `str` | Format for returned results | +| `verbose` | `bool` | Whether verbose logging is enabled | +| `conversation` | `Conversation` | Conversation history management object | + +## Output Types + +The `output_type` parameter controls how the conversation history is formatted: + +| `output_type` Value | Description | +|----------------------------|--------------------------------------------------------------| +| `"str-all-except-first"` | Returns a formatted string with all messages except the first (default) | +| `"str"` | Returns all messages as a formatted string | +| `"dict"` | Returns messages as a dictionary | +| `"list"` | Returns messages as a list | + + +## Usage Patterns + +### Single Topic Debate + +For focused debate and refinement on a single complex topic: + +```python +# Simple single topic execution +result = debate_system.run("Should universal basic income be implemented?") + +# With custom output format +debate_system.output_type = "dict" +result = debate_system.run("Should universal basic income be implemented?") +``` + +### Batch Processing + +For processing multiple related topics sequentially: + +```python +# Process multiple policy questions +policy_topics = [ + "Should healthcare be universal?", + "Should education be free?", + "Should carbon emissions be taxed?" +] +results = debate_system.batched_run(policy_topics) +``` + +### Custom Agent Configuration + +For specialized debate scenarios with custom agent prompts: + +```python +# Create specialized agents for technical debates +technical_pro = Agent( + agent_name="Technical-Pro", + system_prompt="You are a software engineering expert arguing for technical solutions...", + model_name="gpt-4", + max_loops=1, +) + +technical_con = Agent( + agent_name="Technical-Con", + system_prompt="You are a software engineering expert arguing against technical solutions...", + model_name="gpt-4", + max_loops=1, +) + +technical_judge = Agent( + agent_name="Technical-Judge", + system_prompt="You are a senior software architect evaluating technical arguments...", + model_name="gpt-4", + max_loops=1, +) + +technical_debate = DebateWithJudge( + pro_agent=technical_pro, + con_agent=technical_con, + judge_agent=technical_judge, + max_rounds=5, # More rounds for complex technical topics + verbose=True, +) +``` + +## Usage Examples + +### Example 1: Policy Debate on AI Regulation + +This example demonstrates using `DebateWithJudge` for a comprehensive policy debate on AI regulation, with multiple rounds of refinement. + +```python +from swarms import Agent, DebateWithJudge + +# Create the Pro agent (arguing in favor of AI regulation) +pro_agent = Agent( + agent_name="Pro-Regulation-Agent", + system_prompt=( + "You are a policy expert specializing in technology regulation. " + "You argue in favor of government regulation of artificial intelligence. " + "You present well-reasoned arguments focusing on safety, ethics, " + "and public interest. You use evidence, examples, and logical reasoning. " + "You are persuasive and articulate, emphasizing the need for oversight " + "to prevent harm and ensure responsible AI development." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the Con agent (arguing against AI regulation) +con_agent = Agent( + agent_name="Anti-Regulation-Agent", + system_prompt=( + "You are a technology policy expert specializing in innovation. " + "You argue against heavy government regulation of artificial intelligence. " + "You present strong counter-arguments focusing on innovation, economic growth, " + "and the risks of over-regulation. You identify weaknesses in regulatory " + "proposals and provide compelling alternatives such as industry self-regulation " + "and ethical guidelines. You emphasize the importance of maintaining " + "technological competitiveness." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the Judge agent (evaluates and synthesizes) +judge_agent = Agent( + agent_name="Policy-Judge-Agent", + system_prompt=( + "You are an impartial policy analyst and judge who evaluates debates on " + "technology policy. You carefully analyze arguments from both sides, " + "identify strengths and weaknesses, and provide balanced synthesis. " + "You consider multiple perspectives including safety, innovation, economic impact, " + "and ethical considerations. You may declare a winner or provide a refined " + "answer that incorporates the best elements from both arguments, such as " + "balanced regulatory frameworks that protect public interest while fostering innovation." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the DebateWithJudge system +debate_system = DebateWithJudge( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + max_rounds=3, + output_type="str-all-except-first", + verbose=True, +) + +# Define the debate topic +topic = ( + "Should artificial intelligence be regulated by governments? " + "Discuss the balance between innovation and safety, considering " + "both the potential benefits of regulation (safety, ethics, public trust) " + "and the potential drawbacks (stifling innovation, economic impact, " + "regulatory capture). Provide a nuanced analysis." +) + +# Run the debate +result = debate_system.run(task=topic) +print(result) + +# Get the final refined answer +final_answer = debate_system.get_final_answer() +print(final_answer) +``` + +### Example 2: Technical Architecture Debate with Batch Processing + +This example demonstrates using `batched_run` to process multiple technical architecture questions, comparing different approaches to system design. + +```python +from swarms import Agent, DebateWithJudge + +# Create specialized technical agents +pro_agent = Agent( + agent_name="Microservices-Pro", + system_prompt=( + "You are a software architecture expert advocating for microservices architecture. " + "You present arguments focusing on scalability, independent deployment, " + "technology diversity, and team autonomy. You use real-world examples and " + "case studies to support your position." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +con_agent = Agent( + agent_name="Monolith-Pro", + system_prompt=( + "You are a software architecture expert advocating for monolithic architecture. " + "You present counter-arguments focusing on simplicity, reduced complexity, " + "easier debugging, and lower operational overhead. You identify weaknesses " + "in microservices approaches and provide compelling alternatives." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +judge_agent = Agent( + agent_name="Architecture-Judge", + system_prompt=( + "You are a senior software architect evaluating architecture debates. " + "You analyze both arguments considering factors like team size, project scale, " + "complexity, operational capabilities, and long-term maintainability. " + "You provide balanced synthesis that considers context-specific trade-offs." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the debate system +architecture_debate = DebateWithJudge( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + max_rounds=2, # Fewer rounds for more focused technical debates + output_type="str-all-except-first", + verbose=True, +) + +# Define multiple architecture questions +architecture_questions = [ + "Should a startup with 5 developers use microservices or monolithic architecture?", + "Is serverless architecture better than containerized deployments for event-driven systems?", + "Should a financial application use SQL or NoSQL databases for transaction processing?", + "Is event-driven architecture superior to request-response for real-time systems?", +] + +# Execute batch processing +results = architecture_debate.batched_run(architecture_questions) + +# Display results +for result in results: + print(result) +``` + +### Example 3: Business Strategy Debate with Custom Configuration + +This example demonstrates a business strategy debate with custom agent configurations, multiple rounds, and accessing conversation history. + +```python +from swarms import Agent, DebateWithJudge + +# Create business strategy agents with detailed expertise +pro_agent = Agent( + agent_name="Growth-Strategy-Pro", + system_prompt=( + "You are a business strategy consultant specializing in aggressive growth strategies. " + "You argue in favor of rapid expansion, market penetration, and scaling. " + "You present arguments focusing on first-mover advantages, market share capture, " + "network effects, and competitive positioning. You use case studies from " + "successful companies like Amazon, Uber, and Airbnb to support your position." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +con_agent = Agent( + agent_name="Sustainable-Growth-Pro", + system_prompt=( + "You are a business strategy consultant specializing in sustainable, profitable growth. " + "You argue against aggressive expansion in favor of measured, sustainable growth. " + "You present counter-arguments focusing on profitability, unit economics, " + "sustainable competitive advantages, and avoiding overextension. You identify " + "weaknesses in 'growth at all costs' approaches and provide compelling alternatives " + "based on companies like Apple, Microsoft, and Berkshire Hathaway." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +judge_agent = Agent( + agent_name="Strategy-Judge", + system_prompt=( + "You are a seasoned business strategist and former CEO evaluating growth strategy debates. " + "You carefully analyze arguments from both sides, considering factors like: " + "- Market conditions and competitive landscape\n" + "- Company resources and capabilities\n" + "- Risk tolerance and financial position\n" + "- Long-term sustainability vs. short-term growth\n" + "- Industry-specific dynamics\n\n" + "You provide balanced synthesis that incorporates the best elements from both arguments, " + "considering context-specific factors. You may recommend a hybrid approach when appropriate." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the debate system with extended rounds for complex strategy discussions +strategy_debate = DebateWithJudge( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + max_rounds=4, # More rounds for complex strategic discussions + output_type="dict", # Use dict format for structured analysis + verbose=True, +) + +# Define a complex business strategy question +strategy_question = ( + "A SaaS startup with $2M ARR, 40% gross margins, and $500K in the bank " + "is considering two paths:\n" + "1. Aggressive growth: Raise $10M, hire 50 people, expand to 5 new markets\n" + "2. Sustainable growth: Focus on profitability, improve unit economics, " + "expand gradually with existing resources\n\n" + "Which strategy should they pursue? Consider market conditions, competitive " + "landscape, and long-term viability." +) + +# Run the debate +result = strategy_debate.run(task=strategy_question) +print(result) + +# Get the full conversation history for detailed analysis +history = strategy_debate.get_conversation_history() +print(history) + +# Get the final refined answer +final_answer = strategy_debate.get_final_answer() +print(final_answer) +``` + +## Best Practices + +### Agent Configuration + +!!! tip "Agent Configuration Best Practices" + - **Pro Agent**: Should be configured with expertise in the topic area and strong argumentation skills + - **Con Agent**: Should be configured to identify weaknesses and provide compelling alternatives + - **Judge Agent**: Should be configured with broad expertise and impartial evaluation capabilities + - Use appropriate models for the complexity of the debate topic + - Consider using more powerful models for the Judge agent + +### Round Configuration + +!!! note "Round Configuration Tips" + - Use 2-3 rounds for most topics + - Use 4-5 rounds for complex, multi-faceted topics + - More rounds allow for deeper refinement but increase execution time + - Consider the trade-off between refinement quality and cost + +### Output Format Selection + +!!! info "Output Format Guidelines" + - Use `"str-all-except-first"` for readable summaries (default) + - Use `"dict"` for structured analysis and programmatic processing + - Use `"list"` for detailed conversation inspection + - Use `"str"` for complete conversation history as text + +### Performance Optimization + +!!! warning "Performance Considerations" + - Batch processing is sequential - consider parallel execution for large batches + - Each round requires 3 agent calls (Pro, Con, Judge) + - Memory usage scales with conversation history length + - Consider using lighter models for faster execution when appropriate + +## Troubleshooting + +### Common Issues + +!!! danger "Common Problems" + **Issue**: Agents not following their roles + + **Solution**: Ensure system prompts clearly define each agent's role and expertise + + --- + + **Issue**: Judge synthesis not improving over rounds + + **Solution**: Increase `max_rounds` or improve Judge agent's system prompt to emphasize refinement + + --- + + **Issue**: Debate results are too generic + + **Solution**: Use more specific system prompts and provide detailed context in the task + + --- + + **Issue**: Execution time is too long + + **Solution**: Reduce `max_rounds`, use faster models, or process fewer topics in batch + +## Contributing + +!!! success "Contributing" + Contributions are welcome! Please feel free to submit a Pull Request. + +## License + +!!! info "License" + This project is licensed under the MIT License - see the LICENSE file for details. diff --git a/examples/multi_agent/debate_examples/README.md b/examples/multi_agent/debate_examples/README.md index 80a0aab7..83241c27 100644 --- a/examples/multi_agent/debate_examples/README.md +++ b/examples/multi_agent/debate_examples/README.md @@ -6,7 +6,49 @@ This directory contains examples demonstrating debate patterns for multi-agent s Debate patterns enable agents to engage in structured discussions, present arguments, and reach conclusions through discourse. This pattern is useful for exploring multiple perspectives on complex topics and arriving at well-reasoned decisions. -## Note +## Examples -This directory is currently being populated with debate examples. Check back soon for implementations! +### DebateWithJudge + +The `DebateWithJudge` architecture implements a debate system with self-refinement: + +- **Agent A (Pro)** and **Agent B (Con)** present opposing arguments +- Both arguments are evaluated by a **Judge/Critic Agent** +- The Judge provides a winner or synthesis β†’ refined answer +- The process repeats for N rounds to progressively improve the answer + +**Architecture Flow:** +``` +Agent A (Pro) ↔ Agent B (Con) + β”‚ β”‚ + β–Ό β–Ό + Judge / Critic Agent + β”‚ + β–Ό +Winner or synthesis β†’ refined answer +``` + +**Example Usage:** +```python +from swarms import Agent +from swarms.structs.debate_with_judge import DebateWithJudge + +# Create Pro, Con, and Judge agents +pro_agent = Agent(agent_name="Pro-Agent", ...) +con_agent = Agent(agent_name="Con-Agent", ...) +judge_agent = Agent(agent_name="Judge-Agent", ...) + +# Create debate system +debate = DebateWithJudge( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + max_rounds=3 +) + +# Run debate +result = debate.run("Should AI be regulated?") +``` + +See [debate_with_judge_example.py](./debate_with_judge_example.py) for a complete example. diff --git a/examples/multi_agent/debate_examples/business_strategy_debate_example.py b/examples/multi_agent/debate_examples/business_strategy_debate_example.py new file mode 100644 index 00000000..66ee8b62 --- /dev/null +++ b/examples/multi_agent/debate_examples/business_strategy_debate_example.py @@ -0,0 +1,87 @@ +""" +Example 3: Business Strategy Debate with Custom Configuration + +This example demonstrates a business strategy debate with custom agent configurations, +multiple rounds, and accessing conversation history. +""" + +from swarms import Agent, DebateWithJudge + +# Create business strategy agents with detailed expertise +pro_agent = Agent( + agent_name="Growth-Strategy-Pro", + system_prompt=( + "You are a business strategy consultant specializing in aggressive growth strategies. " + "You argue in favor of rapid expansion, market penetration, and scaling. " + "You present arguments focusing on first-mover advantages, market share capture, " + "network effects, and competitive positioning. You use case studies from " + "successful companies like Amazon, Uber, and Airbnb to support your position." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +con_agent = Agent( + agent_name="Sustainable-Growth-Pro", + system_prompt=( + "You are a business strategy consultant specializing in sustainable, profitable growth. " + "You argue against aggressive expansion in favor of measured, sustainable growth. " + "You present counter-arguments focusing on profitability, unit economics, " + "sustainable competitive advantages, and avoiding overextension. You identify " + "weaknesses in 'growth at all costs' approaches and provide compelling alternatives " + "based on companies like Apple, Microsoft, and Berkshire Hathaway." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +judge_agent = Agent( + agent_name="Strategy-Judge", + system_prompt=( + "You are a seasoned business strategist and former CEO evaluating growth strategy debates. " + "You carefully analyze arguments from both sides, considering factors like: " + "- Market conditions and competitive landscape\n" + "- Company resources and capabilities\n" + "- Risk tolerance and financial position\n" + "- Long-term sustainability vs. short-term growth\n" + "- Industry-specific dynamics\n\n" + "You provide balanced synthesis that incorporates the best elements from both arguments, " + "considering context-specific factors. You may recommend a hybrid approach when appropriate." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the debate system with extended rounds for complex strategy discussions +strategy_debate = DebateWithJudge( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + max_rounds=4, # More rounds for complex strategic discussions + output_type="dict", # Use dict format for structured analysis + verbose=True, +) + +# Define a complex business strategy question +strategy_question = ( + "A SaaS startup with $2M ARR, 40% gross margins, and $500K in the bank " + "is considering two paths:\n" + "1. Aggressive growth: Raise $10M, hire 50 people, expand to 5 new markets\n" + "2. Sustainable growth: Focus on profitability, improve unit economics, " + "expand gradually with existing resources\n\n" + "Which strategy should they pursue? Consider market conditions, competitive " + "landscape, and long-term viability." +) + +# Run the debate +result = strategy_debate.run(task=strategy_question) +print(result) + +# Get the full conversation history for detailed analysis +history = strategy_debate.get_conversation_history() +print(history) + +# Get the final refined answer +final_answer = strategy_debate.get_final_answer() +print(final_answer) + diff --git a/examples/multi_agent/debate_examples/debate_with_judge_example.py b/examples/multi_agent/debate_examples/debate_with_judge_example.py new file mode 100644 index 00000000..663f88e9 --- /dev/null +++ b/examples/multi_agent/debate_examples/debate_with_judge_example.py @@ -0,0 +1,61 @@ +from swarms import Agent, DebateWithJudge + +# Create the Pro agent (arguing in favor) +pro_agent = Agent( + agent_name="Pro-Agent", + system_prompt=( + "You are a skilled debater who argues in favor of positions. " + "You present well-reasoned arguments with evidence, examples, " + "and logical reasoning. You are persuasive and articulate." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the Con agent (arguing against) +con_agent = Agent( + agent_name="Con-Agent", + system_prompt=( + "You are a skilled debater who argues against positions. " + "You present strong counter-arguments with evidence, examples, " + "and logical reasoning. You identify weaknesses in opposing " + "arguments and provide compelling alternatives." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the Judge agent (evaluates and synthesizes) +judge_agent = Agent( + agent_name="Judge-Agent", + system_prompt=( + "You are an impartial judge who evaluates debates. " + "You carefully analyze arguments from both sides, identify " + "strengths and weaknesses, and provide balanced synthesis. " + "You may declare a winner or provide a refined answer that " + "incorporates the best elements from both arguments." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the DebateWithJudge system +debate_system = DebateWithJudge( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + max_rounds=3, # Run 3 rounds of debate and refinement + output_type="str-all-except-first", # Return as formatted string + verbose=True, # Enable verbose logging +) + +# Define the debate topic +topic = ( + "Should artificial intelligence be regulated by governments? " + "Discuss the balance between innovation and safety." +) + +# Run the debate +result = debate_system.run(task=topic) + +print(result) diff --git a/examples/multi_agent/debate_examples/policy_debate_example.py b/examples/multi_agent/debate_examples/policy_debate_example.py new file mode 100644 index 00000000..e8e744c5 --- /dev/null +++ b/examples/multi_agent/debate_examples/policy_debate_example.py @@ -0,0 +1,83 @@ +""" +Example 1: Policy Debate on AI Regulation + +This example demonstrates using DebateWithJudge for a comprehensive policy debate +on AI regulation, with multiple rounds of refinement. +""" + +from swarms import Agent, DebateWithJudge + +# Create the Pro agent (arguing in favor of AI regulation) +pro_agent = Agent( + agent_name="Pro-Regulation-Agent", + system_prompt=( + "You are a policy expert specializing in technology regulation. " + "You argue in favor of government regulation of artificial intelligence. " + "You present well-reasoned arguments focusing on safety, ethics, " + "and public interest. You use evidence, examples, and logical reasoning. " + "You are persuasive and articulate, emphasizing the need for oversight " + "to prevent harm and ensure responsible AI development." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the Con agent (arguing against AI regulation) +con_agent = Agent( + agent_name="Anti-Regulation-Agent", + system_prompt=( + "You are a technology policy expert specializing in innovation. " + "You argue against heavy government regulation of artificial intelligence. " + "You present strong counter-arguments focusing on innovation, economic growth, " + "and the risks of over-regulation. You identify weaknesses in regulatory " + "proposals and provide compelling alternatives such as industry self-regulation " + "and ethical guidelines. You emphasize the importance of maintaining " + "technological competitiveness." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the Judge agent (evaluates and synthesizes) +judge_agent = Agent( + agent_name="Policy-Judge-Agent", + system_prompt=( + "You are an impartial policy analyst and judge who evaluates debates on " + "technology policy. You carefully analyze arguments from both sides, " + "identify strengths and weaknesses, and provide balanced synthesis. " + "You consider multiple perspectives including safety, innovation, economic impact, " + "and ethical considerations. You may declare a winner or provide a refined " + "answer that incorporates the best elements from both arguments, such as " + "balanced regulatory frameworks that protect public interest while fostering innovation." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the DebateWithJudge system +debate_system = DebateWithJudge( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + max_rounds=3, + output_type="str-all-except-first", + verbose=True, +) + +# Define the debate topic +topic = ( + "Should artificial intelligence be regulated by governments? " + "Discuss the balance between innovation and safety, considering " + "both the potential benefits of regulation (safety, ethics, public trust) " + "and the potential drawbacks (stifling innovation, economic impact, " + "regulatory capture). Provide a nuanced analysis." +) + +# Run the debate +result = debate_system.run(task=topic) +print(result) + +# Get the final refined answer +final_answer = debate_system.get_final_answer() +print(final_answer) + diff --git a/examples/multi_agent/debate_examples/technical_architecture_debate_example.py b/examples/multi_agent/debate_examples/technical_architecture_debate_example.py new file mode 100644 index 00000000..e59bfa53 --- /dev/null +++ b/examples/multi_agent/debate_examples/technical_architecture_debate_example.py @@ -0,0 +1,71 @@ +""" +Example 2: Technical Architecture Debate with Batch Processing + +This example demonstrates using batched_run to process multiple technical +architecture questions, comparing different approaches to system design. +""" + +from swarms import Agent, DebateWithJudge + +# Create specialized technical agents +pro_agent = Agent( + agent_name="Microservices-Pro", + system_prompt=( + "You are a software architecture expert advocating for microservices architecture. " + "You present arguments focusing on scalability, independent deployment, " + "technology diversity, and team autonomy. You use real-world examples and " + "case studies to support your position." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +con_agent = Agent( + agent_name="Monolith-Pro", + system_prompt=( + "You are a software architecture expert advocating for monolithic architecture. " + "You present counter-arguments focusing on simplicity, reduced complexity, " + "easier debugging, and lower operational overhead. You identify weaknesses " + "in microservices approaches and provide compelling alternatives." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +judge_agent = Agent( + agent_name="Architecture-Judge", + system_prompt=( + "You are a senior software architect evaluating architecture debates. " + "You analyze both arguments considering factors like team size, project scale, " + "complexity, operational capabilities, and long-term maintainability. " + "You provide balanced synthesis that considers context-specific trade-offs." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the debate system +architecture_debate = DebateWithJudge( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + max_rounds=2, # Fewer rounds for more focused technical debates + output_type="str-all-except-first", + verbose=True, +) + +# Define multiple architecture questions +architecture_questions = [ + "Should a startup with 5 developers use microservices or monolithic architecture?", + "Is serverless architecture better than containerized deployments for event-driven systems?", + "Should a financial application use SQL or NoSQL databases for transaction processing?", + "Is event-driven architecture superior to request-response for real-time systems?", +] + +# Execute batch processing +results = architecture_debate.batched_run(architecture_questions) + +# Display results +for result in results: + print(result) + diff --git a/swarm_router_mv.py b/examples/multi_agent/swarm_router/swarm_router_mv.py similarity index 98% rename from swarm_router_mv.py rename to examples/multi_agent/swarm_router/swarm_router_mv.py index dd691b78..f1511486 100644 --- a/swarm_router_mv.py +++ b/examples/multi_agent/swarm_router/swarm_router_mv.py @@ -37,4 +37,4 @@ router = SwarmRouter( result = router.run( "Conduct a research analysis on water stocks and etfs" ) -print(result) \ No newline at end of file +print(result) diff --git a/examples/reasoning_agents/reasoning_agent_router_examples/ire_example.py b/examples/reasoning_agents/reasoning_agent_router_examples/ire_example.py index b1f3bc00..c9f990cb 100644 --- a/examples/reasoning_agents/reasoning_agent_router_examples/ire_example.py +++ b/examples/reasoning_agents/reasoning_agent_router_examples/ire_example.py @@ -8,4 +8,3 @@ router = ReasoningAgentRouter( result = router.run("Explain photosynthesis in one sentence.") print(result) - diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index 8b822142..e0d3430a 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -11,6 +11,7 @@ from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms.structs.conversation import Conversation from swarms.structs.council_as_judge import CouncilAsAJudge from swarms.structs.cron_job import CronJob +from swarms.structs.debate_with_judge import DebateWithJudge from swarms.structs.graph_workflow import ( Edge, GraphWorkflow, @@ -186,4 +187,5 @@ __all__ = [ "BatchedGridWorkflow", "AOP", "SelfMoASeq", + "DebateWithJudge", ] diff --git a/swarms/structs/debate_with_judge.py b/swarms/structs/debate_with_judge.py new file mode 100644 index 00000000..c77372e0 --- /dev/null +++ b/swarms/structs/debate_with_judge.py @@ -0,0 +1,346 @@ +""" +Debate/Self-Refinement with Judge Architecture + +This module implements a debate architecture where two agents (Pro and Con) +debate a topic, and a Judge agent evaluates their arguments and provides +refined synthesis. The process repeats for N rounds to progressively refine +the answer. +""" + +from typing import List, Union + +from loguru import logger + +from swarms.structs.agent import Agent +from swarms.structs.conversation import Conversation +from swarms.utils.history_output_formatter import ( + history_output_formatter, +) + + +class DebateWithJudge: + """ + A debate architecture with self-refinement through a judge agent. + + This class implements a debate system where: + 1. Agent A (Pro) and Agent B (Con) present opposing arguments + 2. Both arguments are evaluated by a Judge/Critic Agent + 3. The Judge provides a winner or synthesis β†’ refined answer + 4. The process repeats for N rounds to progressively improve the answer + + Architecture: + Agent A (Pro) ↔ Agent B (Con) + β”‚ β”‚ + β–Ό β–Ό + Judge / Critic Agent + β”‚ + β–Ό + Winner or synthesis β†’ refined answer + + Attributes: + pro_agent (Agent): The agent arguing in favor (Pro position). + con_agent (Agent): The agent arguing against (Con position). + judge_agent (Agent): The judge agent that evaluates arguments and provides synthesis. + max_rounds (int): Maximum number of debate rounds to execute. + output_type (str): Format for the output conversation history. + verbose (bool): Whether to enable verbose logging. + """ + + def __init__( + self, + pro_agent: Agent, + con_agent: Agent, + judge_agent: Agent, + max_rounds: int = 3, + output_type: str = "str-all-except-first", + verbose: bool = True, + ): + """ + Initialize the DebateWithJudge architecture. + + Args: + pro_agent (Agent): The agent arguing in favor (Pro position). + con_agent (Agent): The agent arguing against (Con position). + judge_agent (Agent): The judge agent that evaluates arguments and provides synthesis. + max_rounds (int): Maximum number of debate rounds to execute. Defaults to 3. + output_type (str): Format for the output conversation history. Defaults to "str-all-except-first". + verbose (bool): Whether to enable verbose logging. Defaults to True. + + Raises: + ValueError: If any of the required agents are None or if max_rounds is less than 1. + """ + if pro_agent is None: + raise ValueError("pro_agent cannot be None") + if con_agent is None: + raise ValueError("con_agent cannot be None") + if judge_agent is None: + raise ValueError("judge_agent cannot be None") + if max_rounds < 1: + raise ValueError("max_rounds must be at least 1") + + self.pro_agent = pro_agent + self.con_agent = con_agent + self.judge_agent = judge_agent + self.max_rounds = max_rounds + self.output_type = output_type + self.verbose = verbose + + # Initialize conversation history + self.conversation = Conversation() + + if self.verbose: + logger.info( + f"DebateWithJudge initialized with {max_rounds} rounds" + ) + + def run(self, task: str) -> Union[str, List, dict]: + """ + Execute the debate with judge refinement process. + + Args: + task (str): The initial topic or question to debate. + + Returns: + Union[str, List, dict]: The formatted conversation history or final refined answer, + depending on output_type. + + Raises: + ValueError: If task is None or empty. + """ + if not task or not isinstance(task, str): + raise ValueError("Task must be a non-empty string") + + # Initialize agents with their roles + self._initialize_agents(task) + + # Start with the initial task + current_topic = task + + if self.verbose: + logger.info(f"Starting debate on: {task}") + + # Execute N rounds of debate and refinement + for round_num in range(self.max_rounds): + if self.verbose: + logger.info( + f"Round {round_num + 1}/{self.max_rounds}" + ) + + # Step 1: Pro agent presents argument + pro_prompt = self._create_pro_prompt( + current_topic, round_num + ) + pro_argument = self.pro_agent.run(task=pro_prompt) + self.conversation.add( + self.pro_agent.agent_name, pro_argument + ) + + if self.verbose: + logger.debug(f"Pro argument: {pro_argument[:100]}...") + + # Step 2: Con agent presents counter-argument + con_prompt = self._create_con_prompt( + current_topic, pro_argument, round_num + ) + con_argument = self.con_agent.run(task=con_prompt) + self.conversation.add( + self.con_agent.agent_name, con_argument + ) + + if self.verbose: + logger.debug(f"Con argument: {con_argument[:100]}...") + + # Step 3: Judge evaluates both arguments and provides synthesis + judge_prompt = self._create_judge_prompt( + current_topic, pro_argument, con_argument, round_num + ) + judge_synthesis = self.judge_agent.run(task=judge_prompt) + self.conversation.add( + self.judge_agent.agent_name, judge_synthesis + ) + + if self.verbose: + logger.debug( + f"Judge synthesis: {judge_synthesis[:100]}..." + ) + + # Use judge's synthesis as input for next round + current_topic = judge_synthesis + + # Return formatted output + return history_output_formatter( + conversation=self.conversation, type=self.output_type + ) + + def _initialize_agents(self, task: str) -> None: + """ + Initialize agents with their respective roles and context. + + Args: + task (str): The initial task/topic for context. + """ + # Initialize Pro agent + pro_intro = ( + f"You are {self.pro_agent.agent_name}, arguing in favor (Pro position) " + f"of the topic: {task}. Your role is to present strong, well-reasoned " + f"arguments supporting your position. You will debate against " + f"{self.con_agent.agent_name}, who will argue against your position. " + f"A judge ({self.judge_agent.agent_name}) will evaluate both arguments " + f"and provide synthesis. Present compelling evidence and reasoning." + ) + self.pro_agent.run(task=pro_intro) + + # Initialize Con agent + con_intro = ( + f"You are {self.con_agent.agent_name}, arguing against (Con position) " + f"of the topic: {task}. Your role is to present strong, well-reasoned " + f"counter-arguments. You will debate against {self.pro_agent.agent_name}, " + f"who will argue in favor. A judge ({self.judge_agent.agent_name}) will " + f"evaluate both arguments and provide synthesis. Present compelling " + f"counter-evidence and reasoning." + ) + self.con_agent.run(task=con_intro) + + # Initialize Judge agent + judge_intro = ( + f"You are {self.judge_agent.agent_name}, an impartial judge evaluating " + f"a debate between {self.pro_agent.agent_name} (Pro) and " + f"{self.con_agent.agent_name} (Con) on the topic: {task}. " + f"Your role is to carefully evaluate both arguments, identify strengths " + f"and weaknesses, and provide a refined synthesis that incorporates the " + f"best elements from both sides. You may declare a winner or provide a " + f"balanced synthesis. Your output will be used to refine the discussion " + f"in subsequent rounds." + ) + self.judge_agent.run(task=judge_intro) + + def _create_pro_prompt(self, topic: str, round_num: int) -> str: + """ + Create the prompt for the Pro agent. + + Args: + topic (str): The current topic or refined question. + round_num (int): The current round number (0-indexed). + + Returns: + str: The prompt for the Pro agent. + """ + if round_num == 0: + return ( + f"Present your argument in favor of: {topic}\n\n" + f"Provide a strong, well-reasoned argument with evidence and examples." + ) + else: + return ( + f"Round {round_num + 1}: Based on the judge's previous evaluation, " + f"present an improved argument in favor of: {topic}\n\n" + f"Address any weaknesses identified and strengthen your position " + f"with additional evidence and reasoning." + ) + + def _create_con_prompt( + self, topic: str, pro_argument: str, round_num: int + ) -> str: + """ + Create the prompt for the Con agent. + + Args: + topic (str): The current topic or refined question. + pro_argument (str): The Pro agent's argument to counter. + round_num (int): The current round number (0-indexed). + + Returns: + str: The prompt for the Con agent. + """ + if round_num == 0: + return ( + f"Present your counter-argument against: {topic}\n\n" + f"Pro's argument:\n{pro_argument}\n\n" + f"Provide a strong, well-reasoned counter-argument that addresses " + f"the Pro's points and presents evidence against the position." + ) + else: + return ( + f"Round {round_num + 1}: Based on the judge's previous evaluation, " + f"present an improved counter-argument against: {topic}\n\n" + f"Pro's current argument:\n{pro_argument}\n\n" + f"Address any weaknesses identified and strengthen your counter-position " + f"with additional evidence and reasoning." + ) + + def _create_judge_prompt( + self, + topic: str, + pro_argument: str, + con_argument: str, + round_num: int, + ) -> str: + """ + Create the prompt for the Judge agent. + + Args: + topic (str): The current topic or refined question. + pro_argument (str): The Pro agent's argument. + con_argument (str): The Con agent's argument. + round_num (int): The current round number (0-indexed). + + Returns: + str: The prompt for the Judge agent. + """ + is_final_round = round_num == self.max_rounds - 1 + + prompt = ( + f"Round {round_num + 1}/{self.max_rounds}: Evaluate the debate on: {topic}\n\n" + f"Pro's argument ({self.pro_agent.agent_name}):\n{pro_argument}\n\n" + f"Con's argument ({self.con_agent.agent_name}):\n{con_argument}\n\n" + ) + + if is_final_round: + prompt += ( + "This is the final round. Provide a comprehensive final evaluation:\n" + "- Identify the strongest points from both sides\n" + "- Determine a winner OR provide a balanced synthesis\n" + "- Present a refined, well-reasoned answer that incorporates the best " + "elements from both arguments\n" + "- This will be the final output of the debate" + ) + else: + prompt += ( + "Evaluate both arguments and provide:\n" + "- Assessment of strengths and weaknesses in each argument\n" + "- A refined synthesis that incorporates the best elements from both sides\n" + "- Specific feedback for improvement in the next round\n" + "- Your synthesis will be used as the topic for the next round" + ) + + return prompt + + def get_conversation_history(self) -> List[dict]: + """ + Get the full conversation history. + + Returns: + List[dict]: List of message dictionaries containing the conversation history. + """ + return self.conversation.return_messages_as_list() + + def get_final_answer(self) -> str: + """ + Get the final refined answer from the judge. + + Returns: + str: The content of the final judge synthesis. + """ + return self.conversation.get_final_message_content() + + def batched_run(self, tasks: List[str]) -> List[str]: + """ + Run the debate with judge refinement process for a batch of tasks. + + Args: + tasks (List[str]): The list of tasks to run the debate with judge refinement process for. + + Returns: + List[str]: The list of final refined answers. + """ + return [self.run(task) for task in tasks] From da9618df0efa43e5143004d0b5b4b7b275406653 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 19 Nov 2025 17:37:09 -0800 Subject: [PATCH 16/42] [DOCS][SwarmRouter] --- docs/swarms/structs/swarm_router.md | 182 ++++++++++++------ example.py | 6 +- .../business_strategy_debate_example.py | 1 - .../debate_examples/policy_debate_example.py | 1 - .../technical_architecture_debate_example.py | 1 - swarms/structs/agent.py | 154 +++++++++------ swarms/structs/debate_with_judge.py | 2 +- swarms/structs/swarm_router.py | 11 +- tests/structs/test_agent_stream_token.py | 4 +- 9 files changed, 233 insertions(+), 129 deletions(-) diff --git a/docs/swarms/structs/swarm_router.md b/docs/swarms/structs/swarm_router.md index 2a5deeb1..8ccf1203 100644 --- a/docs/swarms/structs/swarm_router.md +++ b/docs/swarms/structs/swarm_router.md @@ -4,13 +4,13 @@ The `SwarmRouter` class is a flexible routing system designed to manage differen Full Path: `from swarms.structs.swarm_router` - ## Initialization Parameters Main class for routing tasks to different swarm types. | Attribute | Type | Description | | --- | --- | --- | +| `id` | str | Unique identifier for the SwarmRouter instance (auto-generated if not provided) | | `name` | str | Name of the SwarmRouter instance | | `description` | str | Description of the SwarmRouter's purpose | | `max_loops` | int | Maximum number of loops to perform | @@ -24,35 +24,84 @@ Main class for routing tasks to different swarm types. | `rules` | str | Rules to inject into every agent | | `documents` | List[str] | List of document file paths | | `output_type` | OutputType | Output format type (e.g., "string", "dict", "list", "json", "yaml", "xml") | -| `no_cluster_ops` | bool | Flag to disable cluster operations | | `speaker_fn` | callable | Speaker function for GroupChat swarm type | | `load_agents_from_csv` | bool | Flag to enable/disable loading agents from CSV | | `csv_file_path` | str | Path to the CSV file for loading agents | | `return_entire_history` | bool | Flag to enable/disable returning the entire conversation history | | `multi_agent_collab_prompt` | bool | Whether to enable multi-agent collaboration prompts | +| `list_all_agents` | bool | Flag to enable/disable listing all agents to each other | +| `conversation` | Any | Conversation object for managing agent interactions | +| `agents_config` | Optional[Dict[Any, Any]] | Configuration dictionary for agents | +| `speaker_function` | str | Speaker function name for InteractiveGroupChat swarm type | +| `heavy_swarm_loops_per_agent` | int | Number of loops per agent for HeavySwarm (default: 1) | +| `heavy_swarm_question_agent_model_name` | str | Model name for the question agent in HeavySwarm (default: "gpt-4.1") | +| `heavy_swarm_worker_model_name` | str | Model name for worker agents in HeavySwarm (default: "gpt-4.1") | +| `heavy_swarm_swarm_show_output` | bool | Flag to show output for HeavySwarm (default: True) | +| `telemetry_enabled` | bool | Flag to enable/disable telemetry logging (default: False) | +| `council_judge_model_name` | str | Model name for the judge in CouncilAsAJudge (default: "gpt-4o-mini") | +| `verbose` | bool | Flag to enable/disable verbose logging (default: False) | +| `worker_tools` | List[Callable] | List of tools available to worker agents | +| `aggregation_strategy` | str | Aggregation strategy for HeavySwarm (default: "synthesis") | -#### Methods: +### Methods -| Method | Parameters | Description | -| --- | --- | --- | -| `__init__` | `name: str = "swarm-router", description: str = "Routes your task to the desired swarm", max_loops: int = 1, agents: List[Union[Agent, Callable]] = [], swarm_type: SwarmType = "SequentialWorkflow", autosave: bool = False, rearrange_flow: str = None, return_json: bool = False, auto_generate_prompts: bool = False, shared_memory_system: Any = None, rules: str = None, documents: List[str] = [], output_type: OutputType = "dict", no_cluster_ops: bool = False, speaker_fn: callable = None, load_agents_from_csv: bool = False, csv_file_path: str = None, return_entire_history: bool = True, multi_agent_collab_prompt: bool = True` | Initialize the SwarmRouter | -| `setup` | None | Set up the SwarmRouter by activating APE and handling shared memory and rules | -| `activate_shared_memory` | None | Activate shared memory with all agents | -| `handle_rules` | None | Inject rules to every agent | -| `activate_ape` | None | Activate automatic prompt engineering for agents that support it | -| `reliability_check` | None | Perform reliability checks on the SwarmRouter configuration | -| `_create_swarm` | `task: str = None, *args, **kwargs` | Create and return the specified swarm type | -| `update_system_prompt_for_agent_in_swarm` | None | Update system prompts for all agents with collaboration prompts | -| `_log` | `level: str, message: str, task: str = "", metadata: Dict[str, Any] = None` | Create a log entry | -| `_run` | `task: str, img: Optional[str] = None, model_response: Optional[str] = None, *args, **kwargs` | Run the specified task on the selected swarm type | -| `run` | `task: str, img: Optional[str] = None, model_response: Optional[str] = None, *args, **kwargs` | Execute a task on the selected swarm type | -| `__call__` | `task: str, *args, **kwargs` | Make the SwarmRouter instance callable | -| `batch_run` | `tasks: List[str], *args, **kwargs` | Execute multiple tasks in sequence | -| `async_run` | `task: str, *args, **kwargs` | Execute a task asynchronously | -| `get_logs` | None | Retrieve all logged entries | -| `concurrent_run` | `task: str, *args, **kwargs` | Execute a task using concurrent execution | -| `concurrent_batch_run` | `tasks: List[str], *args, **kwargs` | Execute multiple tasks concurrently | +#### `run()` + +Execute a task on the selected swarm type. + +**Input Parameters:** + +| Parameter | Type | Required | Default | Description | +| --- | --- | --- | --- | --- | +| `task` | `Optional[str]` | No | `None` | The task to be executed by the swarm | +| `img` | `Optional[str]` | No | `None` | Path to an image file for vision tasks | +| `tasks` | `Optional[List[str]]` | No | `None` | List of tasks (used for BatchedGridWorkflow) | +| `*args` | `Any` | No | - | Variable length argument list | +| `**kwargs` | `Any` | No | - | Arbitrary keyword arguments | + +**Output:** + +| Type | Description | +| --- | --- | +| `Any` | The result of the swarm's execution. The exact type depends on the `output_type` configuration (e.g., `str`, `dict`, `list`, `json`, `yaml`, `xml`) | + +**Example:** + +```python +result = router.run( + task="Analyze the market trends and provide recommendations", + img="chart.png" # Optional +) +``` + +--- + +### `batch_run()` + +Execute multiple tasks in sequence on the selected swarm type. + +**Input Parameters:** +| Parameter | Type | Required | Default | Description | +| --- | --- | --- | --- | --- | +| `tasks` | `List[str]` | Yes | - | List of tasks to be executed sequentially | +| `img` | `Optional[str]` | No | `None` | Path to an image file for vision tasks | +| `imgs` | `Optional[List[str]]` | No | `None` | List of image file paths for vision tasks | +| `*args` | `Any` | No | - | Variable length argument list | +| `**kwargs` | `Any` | No | - | Arbitrary keyword arguments | + +**Output:** + +| Type | Description | +| --- | --- | +| `List[Any]` | A list of results from the swarm's execution, one result per task. Each result type depends on the `output_type` configuration | + +**Example:** + +```python +tasks = ["Analyze Q1 report", "Summarize competitor landscape", "Evaluate market trends"] +results = router.batch_run(tasks, img="report.png") # Optional img parameter +``` ## Available Swarm Types @@ -62,7 +111,6 @@ The `SwarmRouter` supports many various multi-agent architectures for various ap |------------|-------------| | `AgentRearrange` | Optimizes agent arrangement for task execution | | `MixtureOfAgents` | Combines multiple agent types for diverse tasks | -| `SpreadSheetSwarm` | Uses spreadsheet-like operations for task management | | `SequentialWorkflow` | Executes tasks sequentially | | `ConcurrentWorkflow` | Executes tasks in parallel | | `GroupChat` | Facilitates communication among agents in a group chat format | @@ -73,10 +121,10 @@ The `SwarmRouter` supports many various multi-agent architectures for various ap | `MALT` | Multi-Agent Language Tasks | | `CouncilAsAJudge` | Council-based judgment system | | `InteractiveGroupChat` | Interactive group chat with user participation | +| `HeavySwarm` | Heavy swarm architecture with question and worker agents | +| `BatchedGridWorkflow` | Batched grid workflow for parallel task processing | | `auto` | Automatically selects best swarm type via embedding search | - - ## Basic Usage ```python @@ -129,9 +177,13 @@ router = SwarmRouter( if __name__ == "__main__": # Run a comprehensive private equity document analysis task result = router.run( - "Where is the best place to find template term sheets for series A startups? Provide links and references" + task="Where is the best place to find template term sheets for series A startups? Provide links and references", + img=None # Optional: provide image path for vision tasks ) print(result) + + # For BatchedGridWorkflow, you can pass multiple tasks: + # result = router.run(tasks=["Task 1", "Task 2", "Task 3"]) ``` ## Advanced Usage @@ -225,22 +277,6 @@ mixture_router = SwarmRouter( result = mixture_router.run("Evaluate the potential acquisition of TechStartup Inc.") ``` -### SpreadSheetSwarm - -Use Case: Collaborative data processing and analysis. - -```python -spreadsheet_router = SwarmRouter( - name="DataProcessor", - description="Collaborative data processing and analysis", - max_loops=1, - agents=[data_cleaner, statistical_analyzer, visualizer], - swarm_type="SpreadSheetSwarm" -) - -result = spreadsheet_router.run("Process and visualize customer churn data") -``` - ### SequentialWorkflow Use Case: Step-by-step document analysis and report generation. @@ -379,6 +415,47 @@ result = interactive_chat_router.run("Discuss the market trends and provide inte The InteractiveGroupChat allows for dynamic interaction between agents and users, enabling real-time participation in group discussions and decision-making processes. This is particularly useful for scenarios requiring human input or validation during the conversation flow. +### HeavySwarm + +Use Case: Complex task decomposition with question and worker agents. + +```python +heavy_swarm_router = SwarmRouter( + name="HeavySwarm", + description="Complex task decomposition and execution", + swarm_type="HeavySwarm", + heavy_swarm_loops_per_agent=2, + heavy_swarm_question_agent_model_name="gpt-4.1", + heavy_swarm_worker_model_name="gpt-4.1", + heavy_swarm_swarm_show_output=True, + worker_tools=[tool1, tool2], + aggregation_strategy="synthesis", + output_type="string" +) + +result = heavy_swarm_router.run("Analyze market trends and provide comprehensive recommendations") +``` + +HeavySwarm uses a question agent to decompose complex tasks and worker agents to execute subtasks, making it ideal for complex problem-solving scenarios. + +### BatchedGridWorkflow + +Use Case: Parallel processing of multiple tasks in a batched grid format. + +```python +batched_grid_router = SwarmRouter( + name="BatchedGridWorkflow", + description="Process multiple tasks in parallel batches", + max_loops=1, + agents=[agent1, agent2, agent3], + swarm_type="BatchedGridWorkflow" +) + +result = batched_grid_router.run(tasks=["Task 1", "Task 2", "Task 3"]) +``` + +BatchedGridWorkflow is designed for efficiently processing multiple tasks in parallel batches, optimizing resource utilization. + ## Advanced Features ### Processing Documents @@ -402,15 +479,7 @@ To process multiple tasks in a batch: ```python tasks = ["Analyze Q1 report", "Summarize competitor landscape", "Evaluate market trends"] -results = router.batch_run(tasks) -``` - -### Asynchronous Execution - -For asynchronous task execution: - -```python -result = await router.async_run("Generate financial projections") +results = router.batch_run(tasks, img="image.png") # Optional: img parameter for image tasks ``` ### Concurrent Execution @@ -418,16 +487,7 @@ result = await router.async_run("Generate financial projections") To run a single task concurrently: ```python -result = router.concurrent_run("Analyze multiple data streams") -``` - -### Concurrent Batch Processing - -To process multiple tasks concurrently: - -```python -tasks = ["Task 1", "Task 2", "Task 3"] -results = router.concurrent_batch_run(tasks) +result = router.concurrent_run("Analyze multiple data streams", img="image.png") # Optional: img parameter ``` ### Using the SwarmRouter as a Callable diff --git a/example.py b/example.py index 0a27c20c..42959ded 100644 --- a/example.py +++ b/example.py @@ -1,4 +1,3 @@ -import json from swarms import Agent @@ -12,7 +11,7 @@ agent = Agent( dynamic_context_window=True, streaming_on=False, top_p=None, - output_type="dict", + stream=True, ) out = agent.run( @@ -20,4 +19,5 @@ out = agent.run( n=1, ) -print(json.dumps(out, indent=4)) +for token in out: + print(token, end="", flush=True) diff --git a/examples/multi_agent/debate_examples/business_strategy_debate_example.py b/examples/multi_agent/debate_examples/business_strategy_debate_example.py index 66ee8b62..7dd44c11 100644 --- a/examples/multi_agent/debate_examples/business_strategy_debate_example.py +++ b/examples/multi_agent/debate_examples/business_strategy_debate_example.py @@ -84,4 +84,3 @@ print(history) # Get the final refined answer final_answer = strategy_debate.get_final_answer() print(final_answer) - diff --git a/examples/multi_agent/debate_examples/policy_debate_example.py b/examples/multi_agent/debate_examples/policy_debate_example.py index e8e744c5..a2e7c5ce 100644 --- a/examples/multi_agent/debate_examples/policy_debate_example.py +++ b/examples/multi_agent/debate_examples/policy_debate_example.py @@ -80,4 +80,3 @@ print(result) # Get the final refined answer final_answer = debate_system.get_final_answer() print(final_answer) - diff --git a/examples/multi_agent/debate_examples/technical_architecture_debate_example.py b/examples/multi_agent/debate_examples/technical_architecture_debate_example.py index e59bfa53..24ecf3d1 100644 --- a/examples/multi_agent/debate_examples/technical_architecture_debate_example.py +++ b/examples/multi_agent/debate_examples/technical_architecture_debate_example.py @@ -68,4 +68,3 @@ results = architecture_debate.batched_run(architecture_questions) # Display results for result in results: print(result) - diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 29222e1f..32687894 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -2584,91 +2584,129 @@ class Agent: task=task, *args, **kwargs ) - if hasattr(streaming_response, "__iter__") and not isinstance(streaming_response, str): + if hasattr( + streaming_response, "__iter__" + ) and not isinstance(streaming_response, str): complete_response = "" token_count = 0 final_chunk = None first_chunk = None - + for chunk in streaming_response: if first_chunk is None: first_chunk = chunk - - if hasattr(chunk, "choices") and chunk.choices[0].delta.content: + + if ( + hasattr(chunk, "choices") + and chunk.choices[0].delta.content + ): content = chunk.choices[0].delta.content complete_response += content token_count += 1 - + # Schema per token outputted token_info = { "token_index": token_count, - "model": getattr(chunk, 'model', self.get_current_model()), - "id": getattr(chunk, 'id', ''), - "created": getattr(chunk, 'created', int(time.time())), - "object": getattr(chunk, 'object', 'chat.completion.chunk'), + "model": getattr( + chunk, + "model", + self.get_current_model(), + ), + "id": getattr(chunk, "id", ""), + "created": getattr( + chunk, "created", int(time.time()) + ), + "object": getattr( + chunk, + "object", + "chat.completion.chunk", + ), "token": content, - "system_fingerprint": getattr(chunk, 'system_fingerprint', ''), - "finish_reason": chunk.choices[0].finish_reason, - "citations": getattr(chunk, 'citations', None), - "provider_specific_fields": getattr(chunk, 'provider_specific_fields', None), - "service_tier": getattr(chunk, 'service_tier', 'default'), - "obfuscation": getattr(chunk, 'obfuscation', None), - "usage": getattr(chunk, 'usage', None), + "system_fingerprint": getattr( + chunk, "system_fingerprint", "" + ), + "finish_reason": chunk.choices[ + 0 + ].finish_reason, + "citations": getattr( + chunk, "citations", None + ), + "provider_specific_fields": getattr( + chunk, + "provider_specific_fields", + None, + ), + "service_tier": getattr( + chunk, "service_tier", "default" + ), + "obfuscation": getattr( + chunk, "obfuscation", None + ), + "usage": getattr( + chunk, "usage", None + ), "logprobs": chunk.choices[0].logprobs, - "timestamp": time.time() + "timestamp": time.time(), } - + print(f"ResponseStream {token_info}") - + if streaming_callback is not None: streaming_callback(token_info) - + final_chunk = chunk - - #Final ModelResponse to stream - if final_chunk and hasattr(final_chunk, 'usage') and final_chunk.usage: + + # Final ModelResponse to stream + if ( + final_chunk + and hasattr(final_chunk, "usage") + and final_chunk.usage + ): usage = final_chunk.usage - print(f"ModelResponseStream(id='{getattr(final_chunk, 'id', 'N/A')}', " - f"created={getattr(final_chunk, 'created', 'N/A')}, " - f"model='{getattr(final_chunk, 'model', self.get_current_model())}', " - f"object='{getattr(final_chunk, 'object', 'chat.completion.chunk')}', " - f"system_fingerprint='{getattr(final_chunk, 'system_fingerprint', 'N/A')}', " - f"choices=[StreamingChoices(finish_reason='{final_chunk.choices[0].finish_reason}', " - f"index=0, delta=Delta(provider_specific_fields=None, content=None, role=None, " - f"function_call=None, tool_calls=None, audio=None), logprobs=None)], " - f"provider_specific_fields=None, " - f"usage=Usage(completion_tokens={usage.completion_tokens}, " - f"prompt_tokens={usage.prompt_tokens}, " - f"total_tokens={usage.total_tokens}, " - f"completion_tokens_details=CompletionTokensDetailsWrapper(" - f"accepted_prediction_tokens={usage.completion_tokens_details.accepted_prediction_tokens}, " - f"audio_tokens={usage.completion_tokens_details.audio_tokens}, " - f"reasoning_tokens={usage.completion_tokens_details.reasoning_tokens}, " - f"rejected_prediction_tokens={usage.completion_tokens_details.rejected_prediction_tokens}, " - f"text_tokens={usage.completion_tokens_details.text_tokens}), " - f"prompt_tokens_details=PromptTokensDetailsWrapper(" - f"audio_tokens={usage.prompt_tokens_details.audio_tokens}, " - f"cached_tokens={usage.prompt_tokens_details.cached_tokens}, " - f"text_tokens={usage.prompt_tokens_details.text_tokens}, " - f"image_tokens={usage.prompt_tokens_details.image_tokens})))") + print( + f"ModelResponseStream(id='{getattr(final_chunk, 'id', 'N/A')}', " + f"created={getattr(final_chunk, 'created', 'N/A')}, " + f"model='{getattr(final_chunk, 'model', self.get_current_model())}', " + f"object='{getattr(final_chunk, 'object', 'chat.completion.chunk')}', " + f"system_fingerprint='{getattr(final_chunk, 'system_fingerprint', 'N/A')}', " + f"choices=[StreamingChoices(finish_reason='{final_chunk.choices[0].finish_reason}', " + f"index=0, delta=Delta(provider_specific_fields=None, content=None, role=None, " + f"function_call=None, tool_calls=None, audio=None), logprobs=None)], " + f"provider_specific_fields=None, " + f"usage=Usage(completion_tokens={usage.completion_tokens}, " + f"prompt_tokens={usage.prompt_tokens}, " + f"total_tokens={usage.total_tokens}, " + f"completion_tokens_details=CompletionTokensDetailsWrapper(" + f"accepted_prediction_tokens={usage.completion_tokens_details.accepted_prediction_tokens}, " + f"audio_tokens={usage.completion_tokens_details.audio_tokens}, " + f"reasoning_tokens={usage.completion_tokens_details.reasoning_tokens}, " + f"rejected_prediction_tokens={usage.completion_tokens_details.rejected_prediction_tokens}, " + f"text_tokens={usage.completion_tokens_details.text_tokens}), " + f"prompt_tokens_details=PromptTokensDetailsWrapper(" + f"audio_tokens={usage.prompt_tokens_details.audio_tokens}, " + f"cached_tokens={usage.prompt_tokens_details.cached_tokens}, " + f"text_tokens={usage.prompt_tokens_details.text_tokens}, " + f"image_tokens={usage.prompt_tokens_details.image_tokens})))" + ) else: - print(f"ModelResponseStream(id='{getattr(final_chunk, 'id', 'N/A')}', " - f"created={getattr(final_chunk, 'created', 'N/A')}, " - f"model='{getattr(final_chunk, 'model', self.get_current_model())}', " - f"object='{getattr(final_chunk, 'object', 'chat.completion.chunk')}', " - f"system_fingerprint='{getattr(final_chunk, 'system_fingerprint', 'N/A')}', " - f"choices=[StreamingChoices(finish_reason='{final_chunk.choices[0].finish_reason}', " - f"index=0, delta=Delta(provider_specific_fields=None, content=None, role=None, " - f"function_call=None, tool_calls=None, audio=None), logprobs=None)], " - f"provider_specific_fields=None)") - - + print( + f"ModelResponseStream(id='{getattr(final_chunk, 'id', 'N/A')}', " + f"created={getattr(final_chunk, 'created', 'N/A')}, " + f"model='{getattr(final_chunk, 'model', self.get_current_model())}', " + f"object='{getattr(final_chunk, 'object', 'chat.completion.chunk')}', " + f"system_fingerprint='{getattr(final_chunk, 'system_fingerprint', 'N/A')}', " + f"choices=[StreamingChoices(finish_reason='{final_chunk.choices[0].finish_reason}', " + f"index=0, delta=Delta(provider_specific_fields=None, content=None, role=None, " + f"function_call=None, tool_calls=None, audio=None), logprobs=None)], " + f"provider_specific_fields=None)" + ) + self.llm.stream = original_stream return complete_response else: self.llm.stream = original_stream return streaming_response - + elif self.streaming_on and hasattr(self.llm, "stream"): original_stream = self.llm.stream self.llm.stream = True diff --git a/swarms/structs/debate_with_judge.py b/swarms/structs/debate_with_judge.py index c77372e0..e3104198 100644 --- a/swarms/structs/debate_with_judge.py +++ b/swarms/structs/debate_with_judge.py @@ -332,7 +332,7 @@ class DebateWithJudge: str: The content of the final judge synthesis. """ return self.conversation.get_final_message_content() - + def batched_run(self, tasks: List[str]) -> List[str]: """ Run the debate with judge refinement process for a batch of tasks. diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index b078d970..92903f57 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -2,7 +2,16 @@ import concurrent.futures import json import os import traceback -from typing import Any, Callable, Dict, List, Literal, Optional, Union, get_args +from typing import ( + Any, + Callable, + Dict, + List, + Literal, + Optional, + Union, + get_args, +) from pydantic import BaseModel, Field diff --git a/tests/structs/test_agent_stream_token.py b/tests/structs/test_agent_stream_token.py index 5cd02207..0e146d75 100644 --- a/tests/structs/test_agent_stream_token.py +++ b/tests/structs/test_agent_stream_token.py @@ -1,8 +1,8 @@ from swarms.structs.agent import Agent agent = Agent( - model_name="gpt-4.1", - max_loops=1, + model_name="gpt-4.1", + max_loops=1, stream=True, ) From d817ee791361014dd5ba1ed8061a06cbf5eecfee Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 19:05:03 -0800 Subject: [PATCH 17/42] added run_async to agent rearrange and handeled test sequantil workflow init --- swarms/structs/agent_rearrange.py | 40 +++++++++++++++++++++++ tests/structs/test_sequential_workflow.py | 14 ++------ 2 files changed, 43 insertions(+), 11 deletions(-) diff --git a/swarms/structs/agent_rearrange.py b/swarms/structs/agent_rearrange.py index d3016de4..c962a518 100644 --- a/swarms/structs/agent_rearrange.py +++ b/swarms/structs/agent_rearrange.py @@ -908,6 +908,46 @@ class AgentRearrange: except Exception as e: self._catch_error(e) + async def run_async( + self, + task: str, + img: Optional[str] = None, + *args, + **kwargs, + ) -> Any: + """ + Asynchronously executes a task through the agent workflow. + + This method enables asynchronous execution of tasks by running the + synchronous run method in a separate thread using asyncio.to_thread. + This is ideal for integrating the agent workflow into async applications + or when you want non-blocking execution. + + Args: + task (str): The task to be executed through the agent workflow. + img (Optional[str]): Optional image input for the task. Defaults to None. + *args: Additional positional arguments passed to the run method. + **kwargs: Additional keyword arguments passed to the run method. + + Returns: + Any: The result of the task execution, format depends on output_type setting. + + Raises: + Exception: If an error occurs during task execution. + + Note: + This method uses asyncio.to_thread to run the synchronous run method + asynchronously, allowing integration with async/await patterns. + """ + import asyncio + + try: + return await asyncio.to_thread( + self.run, task=task, img=img, *args, **kwargs + ) + except Exception as e: + self._catch_error(e) + def _serialize_callable( self, attr_value: Callable ) -> Dict[str, Any]: diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py index 6d8f74a1..f905fe47 100644 --- a/tests/structs/test_sequential_workflow.py +++ b/tests/structs/test_sequential_workflow.py @@ -5,17 +5,9 @@ from swarms import Agent, SequentialWorkflow # Test SequentialWorkflow class def test_sequential_workflow_initialization(): - workflow = SequentialWorkflow() - assert isinstance(workflow, SequentialWorkflow) - assert len(workflow.tasks) == 0 - assert workflow.max_loops == 1 - assert workflow.autosave is False - assert ( - workflow.saved_state_filepath - == "sequential_workflow_state.json" - ) - assert workflow.restore_state_filepath is None - assert workflow.dashboard is False + # SequentialWorkflow requires agents, so expect ValueError + with pytest.raises(ValueError, match="Agents list cannot be None or empty"): + workflow = SequentialWorkflow() def test_sequential_workflow_initialization_with_agents(): From 4635903c84227334f1db0f427519b430879b451c Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 19:19:18 -0800 Subject: [PATCH 18/42] added import asyncio to the top --- swarms/structs/agent_rearrange.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/swarms/structs/agent_rearrange.py b/swarms/structs/agent_rearrange.py index c962a518..a0155ef6 100644 --- a/swarms/structs/agent_rearrange.py +++ b/swarms/structs/agent_rearrange.py @@ -1,7 +1,7 @@ import json from concurrent.futures import ThreadPoolExecutor from typing import Any, Callable, Dict, List, Optional, Union - +import asyncio from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation from swarms.structs.multi_agent_exec import run_agents_concurrently @@ -939,7 +939,6 @@ class AgentRearrange: This method uses asyncio.to_thread to run the synchronous run method asynchronously, allowing integration with async/await patterns. """ - import asyncio try: return await asyncio.to_thread( From 682efcff53d327b8f243112d1d79398773bdf837 Mon Sep 17 00:00:00 2001 From: Hugh <155223694+hughiwnl@users.noreply.github.com> Date: Wed, 19 Nov 2025 19:32:13 -0800 Subject: [PATCH 19/42] tests for multi agent debate --- tests/structs/test_multi_agent_debate.py | 1097 ++++++++++++++++++++++ 1 file changed, 1097 insertions(+) create mode 100644 tests/structs/test_multi_agent_debate.py diff --git a/tests/structs/test_multi_agent_debate.py b/tests/structs/test_multi_agent_debate.py new file mode 100644 index 00000000..12737b3b --- /dev/null +++ b/tests/structs/test_multi_agent_debate.py @@ -0,0 +1,1097 @@ +import pytest +from loguru import logger +from swarms.structs.multi_agent_debates import ( + OneOnOneDebate, + ExpertPanelDiscussion, + RoundTableDiscussion, + InterviewSeries, + PeerReviewProcess, + MediationSession, + BrainstormingSession, + TrialSimulation, + CouncilMeeting, + MentorshipSession, + NegotiationSession, +) +from swarms.structs.agent import Agent + + +def create_function_agent(name: str, system_prompt: str = None): + if system_prompt is None: + system_prompt = f"You are {name}. Provide concise and direct responses." + + agent = Agent( + agent_name=name, + agent_description=f"Test agent {name}", + system_prompt=system_prompt, + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + return agent + + +@pytest.fixture +def sample_two_agents(): + agent1 = create_function_agent( + "Agent1", + "You are Agent1. Provide concise responses." + ) + agent2 = create_function_agent( + "Agent2", + "You are Agent2. Provide concise responses." + ) + return [agent1, agent2] + + +@pytest.fixture +def sample_three_agents(): + agent1 = create_function_agent("Agent1") + agent2 = create_function_agent("Agent2") + agent3 = create_function_agent("Agent3") + return [agent1, agent2, agent3] + + +@pytest.fixture +def sample_task(): + return "What is 2+2?" + + +def test_one_on_one_debate_initialization(sample_two_agents): + try: + assert sample_two_agents is not None + debate = OneOnOneDebate( + max_loops=2, + agents=sample_two_agents, + output_type="str-all-except-first", + ) + assert debate is not None + assert debate.max_loops == 2 + assert len(debate.agents) == 2 + assert debate.output_type == "str-all-except-first" + logger.info("OneOnOneDebate initialization test passed") + except Exception as e: + logger.error(f"Failed to test OneOnOneDebate initialization: {e}") + raise + + +def test_one_on_one_debate_run(sample_two_agents, sample_task): + try: + assert sample_two_agents is not None + assert sample_task is not None + debate = OneOnOneDebate( + max_loops=2, + agents=sample_two_agents, + output_type="str-all-except-first", + ) + assert debate is not None + result = debate.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("OneOnOneDebate run test passed") + except Exception as e: + logger.error(f"Failed to test OneOnOneDebate run: {e}") + raise + + +def test_one_on_one_debate_wrong_number_of_agents(sample_three_agents, sample_task): + try: + debate = OneOnOneDebate( + max_loops=2, + agents=sample_three_agents, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="exactly two agents"): + debate.run(sample_task) + logger.info("OneOnOneDebate wrong number of agents test passed") + except Exception as e: + logger.error(f"Failed to test OneOnOneDebate wrong number of agents: {e}") + raise + + +def test_one_on_one_debate_output_types(sample_two_agents, sample_task): + try: + assert sample_two_agents is not None + assert sample_task is not None + output_types = ["str-all-except-first", "list", "dict", "str"] + assert output_types is not None + for output_type in output_types: + debate = OneOnOneDebate( + max_loops=2, + agents=sample_two_agents, + output_type=output_type, + ) + assert debate is not None + result = debate.run(sample_task) + assert result is not None + if output_type == "list": + assert isinstance(result, list) + elif output_type == "dict": + assert isinstance(result, (dict, list)) + else: + assert isinstance(result, str) + logger.info("OneOnOneDebate output types test passed") + except Exception as e: + logger.error(f"Failed to test OneOnOneDebate output types: {e}") + raise + + +def test_one_on_one_debate_with_image(sample_two_agents): + try: + assert sample_two_agents is not None + task = "Analyze this image" + assert task is not None + img = "test_image.jpg" + assert img is not None + debate = OneOnOneDebate( + max_loops=2, + agents=sample_two_agents, + img=img, + output_type="str-all-except-first", + ) + assert debate is not None + result = debate.run(task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("OneOnOneDebate with image test passed") + except Exception as e: + logger.error(f"Failed to test OneOnOneDebate with image: {e}") + raise + + +def test_expert_panel_discussion_initialization(sample_three_agents): + try: + moderator = create_function_agent("Moderator") + assert moderator is not None + panel = ExpertPanelDiscussion( + max_rounds=2, + agents=sample_three_agents, + moderator=moderator, + output_type="str-all-except-first", + ) + assert panel is not None + assert panel.max_rounds == 2 + assert len(panel.agents) == 3 + assert panel.moderator is not None + logger.info("ExpertPanelDiscussion initialization test passed") + except Exception as e: + logger.error(f"Failed to test ExpertPanelDiscussion initialization: {e}") + raise + + +def test_expert_panel_discussion_run(sample_three_agents, sample_task): + try: + moderator = create_function_agent("Moderator") + assert moderator is not None + panel = ExpertPanelDiscussion( + max_rounds=2, + agents=sample_three_agents, + moderator=moderator, + output_type="str-all-except-first", + ) + assert panel is not None + result = panel.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("ExpertPanelDiscussion run test passed") + except Exception as e: + logger.error(f"Failed to test ExpertPanelDiscussion run: {e}") + raise + + +def test_expert_panel_discussion_insufficient_agents(sample_task): + try: + moderator = create_function_agent("Moderator") + assert moderator is not None + single_agent = [create_function_agent("Agent1")] + assert single_agent is not None + assert len(single_agent) > 0 + assert single_agent[0] is not None + panel = ExpertPanelDiscussion( + max_rounds=2, + agents=single_agent, + moderator=moderator, + output_type="str-all-except-first", + ) + assert panel is not None + with pytest.raises(ValueError, match="At least two expert agents"): + panel.run(sample_task) + logger.info("ExpertPanelDiscussion insufficient agents test passed") + except Exception as e: + logger.error(f"Failed to test ExpertPanelDiscussion insufficient agents: {e}") + raise + + +def test_expert_panel_discussion_no_moderator(sample_three_agents, sample_task): + try: + panel = ExpertPanelDiscussion( + max_rounds=2, + agents=sample_three_agents, + moderator=None, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="moderator agent is required"): + panel.run(sample_task) + logger.info("ExpertPanelDiscussion no moderator test passed") + except Exception as e: + logger.error(f"Failed to test ExpertPanelDiscussion no moderator: {e}") + raise + + +def test_round_table_discussion_initialization(sample_three_agents): + try: + facilitator = create_function_agent("Facilitator") + assert facilitator is not None + round_table = RoundTableDiscussion( + max_cycles=2, + agents=sample_three_agents, + facilitator=facilitator, + output_type="str-all-except-first", + ) + assert round_table is not None + assert round_table.max_cycles == 2 + assert len(round_table.agents) == 3 + assert round_table.facilitator is not None + logger.info("RoundTableDiscussion initialization test passed") + except Exception as e: + logger.error(f"Failed to test RoundTableDiscussion initialization: {e}") + raise + + +def test_round_table_discussion_run(sample_three_agents, sample_task): + try: + facilitator = create_function_agent("Facilitator") + assert facilitator is not None + round_table = RoundTableDiscussion( + max_cycles=2, + agents=sample_three_agents, + facilitator=facilitator, + output_type="str-all-except-first", + ) + assert round_table is not None + result = round_table.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("RoundTableDiscussion run test passed") + except Exception as e: + logger.error(f"Failed to test RoundTableDiscussion run: {e}") + raise + + +def test_round_table_discussion_insufficient_agents(sample_task): + try: + facilitator = create_function_agent("Facilitator") + single_agent = [create_function_agent("Agent1")] + round_table = RoundTableDiscussion( + max_cycles=2, + agents=single_agent, + facilitator=facilitator, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="At least two participants"): + round_table.run(sample_task) + logger.info("RoundTableDiscussion insufficient agents test passed") + except Exception as e: + logger.error(f"Failed to test RoundTableDiscussion insufficient agents: {e}") + raise + + +def test_round_table_discussion_no_facilitator(sample_three_agents, sample_task): + try: + round_table = RoundTableDiscussion( + max_cycles=2, + agents=sample_three_agents, + facilitator=None, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="facilitator agent is required"): + round_table.run(sample_task) + logger.info("RoundTableDiscussion no facilitator test passed") + except Exception as e: + logger.error(f"Failed to test RoundTableDiscussion no facilitator: {e}") + raise + + +def test_interview_series_initialization(): + try: + interviewer = create_function_agent("Interviewer") + assert interviewer is not None + interviewee = create_function_agent("Interviewee") + assert interviewee is not None + questions = ["Question 1", "Question 2"] + assert questions is not None + interview = InterviewSeries( + questions=questions, + interviewer=interviewer, + interviewee=interviewee, + follow_up_depth=1, + output_type="str-all-except-first", + ) + assert interview is not None + assert interview.questions == questions + assert interview.interviewer is not None + assert interview.interviewee is not None + assert interview.follow_up_depth == 1 + logger.info("InterviewSeries initialization test passed") + except Exception as e: + logger.error(f"Failed to test InterviewSeries initialization: {e}") + raise + + +def test_interview_series_run(sample_task): + try: + interviewer = create_function_agent("Interviewer") + assert interviewer is not None + interviewee = create_function_agent("Interviewee") + assert interviewee is not None + questions = ["Question 1", "Question 2"] + assert questions is not None + interview = InterviewSeries( + questions=questions, + interviewer=interviewer, + interviewee=interviewee, + follow_up_depth=1, + output_type="str-all-except-first", + ) + assert interview is not None + result = interview.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("InterviewSeries run test passed") + except Exception as e: + logger.error(f"Failed to test InterviewSeries run: {e}") + raise + + +def test_interview_series_no_interviewer(sample_task): + try: + interviewee = create_function_agent("Interviewee") + interview = InterviewSeries( + questions=["Question 1"], + interviewer=None, + interviewee=interviewee, + follow_up_depth=1, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="Both interviewer and interviewee"): + interview.run(sample_task) + logger.info("InterviewSeries no interviewer test passed") + except Exception as e: + logger.error(f"Failed to test InterviewSeries no interviewer: {e}") + raise + + +def test_interview_series_no_interviewee(sample_task): + try: + interviewer = create_function_agent("Interviewer") + interview = InterviewSeries( + questions=["Question 1"], + interviewer=interviewer, + interviewee=None, + follow_up_depth=1, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="Both interviewer and interviewee"): + interview.run(sample_task) + logger.info("InterviewSeries no interviewee test passed") + except Exception as e: + logger.error(f"Failed to test InterviewSeries no interviewee: {e}") + raise + + +def test_interview_series_default_questions(sample_task): + try: + interviewer = create_function_agent("Interviewer") + assert interviewer is not None + interviewee = create_function_agent("Interviewee") + assert interviewee is not None + assert sample_task is not None + interview = InterviewSeries( + questions=None, + interviewer=interviewer, + interviewee=interviewee, + follow_up_depth=1, + output_type="str-all-except-first", + ) + assert interview is not None + result = interview.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("InterviewSeries default questions test passed") + except Exception as e: + logger.error(f"Failed to test InterviewSeries default questions: {e}") + raise + + +def test_peer_review_process_initialization(): + try: + reviewers = [create_function_agent("Reviewer1"), create_function_agent("Reviewer2")] + assert reviewers is not None + assert len(reviewers) == 2 + assert reviewers[0] is not None + assert reviewers[1] is not None + author = create_function_agent("Author") + assert author is not None + peer_review = PeerReviewProcess( + reviewers=reviewers, + author=author, + review_rounds=2, + output_type="str-all-except-first", + ) + assert peer_review is not None + assert len(peer_review.reviewers) == 2 + assert peer_review.author is not None + assert peer_review.review_rounds == 2 + logger.info("PeerReviewProcess initialization test passed") + except Exception as e: + logger.error(f"Failed to test PeerReviewProcess initialization: {e}") + raise + + +def test_peer_review_process_run(sample_task): + try: + reviewers = [create_function_agent("Reviewer1"), create_function_agent("Reviewer2")] + assert reviewers is not None + assert len(reviewers) == 2 + author = create_function_agent("Author") + assert author is not None + peer_review = PeerReviewProcess( + reviewers=reviewers, + author=author, + review_rounds=2, + output_type="str-all-except-first", + ) + assert peer_review is not None + result = peer_review.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("PeerReviewProcess run test passed") + except Exception as e: + logger.error(f"Failed to test PeerReviewProcess run: {e}") + raise + + +def test_peer_review_process_no_reviewers(sample_task): + try: + author = create_function_agent("Author") + peer_review = PeerReviewProcess( + reviewers=[], + author=author, + review_rounds=2, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="At least one reviewer"): + peer_review.run(sample_task) + logger.info("PeerReviewProcess no reviewers test passed") + except Exception as e: + logger.error(f"Failed to test PeerReviewProcess no reviewers: {e}") + raise + + +def test_peer_review_process_no_author(sample_task): + try: + reviewers = [create_function_agent("Reviewer1")] + peer_review = PeerReviewProcess( + reviewers=reviewers, + author=None, + review_rounds=2, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="author agent is required"): + peer_review.run(sample_task) + logger.info("PeerReviewProcess no author test passed") + except Exception as e: + logger.error(f"Failed to test PeerReviewProcess no author: {e}") + raise + + +def test_mediation_session_initialization(sample_two_agents): + try: + mediator = create_function_agent("Mediator") + assert mediator is not None + assert sample_two_agents is not None + mediation = MediationSession( + parties=sample_two_agents, + mediator=mediator, + max_sessions=2, + output_type="str-all-except-first", + ) + assert mediation is not None + assert len(mediation.parties) == 2 + assert mediation.mediator is not None + assert mediation.max_sessions == 2 + logger.info("MediationSession initialization test passed") + except Exception as e: + logger.error(f"Failed to test MediationSession initialization: {e}") + raise + + +def test_mediation_session_run(sample_two_agents, sample_task): + try: + mediator = create_function_agent("Mediator") + assert mediator is not None + assert sample_two_agents is not None + mediation = MediationSession( + parties=sample_two_agents, + mediator=mediator, + max_sessions=2, + output_type="str-all-except-first", + ) + assert mediation is not None + result = mediation.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("MediationSession run test passed") + except Exception as e: + logger.error(f"Failed to test MediationSession run: {e}") + raise + + +def test_mediation_session_insufficient_parties(sample_task): + try: + mediator = create_function_agent("Mediator") + single_party = [create_function_agent("Party1")] + mediation = MediationSession( + parties=single_party, + mediator=mediator, + max_sessions=2, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="At least two parties"): + mediation.run(sample_task) + logger.info("MediationSession insufficient parties test passed") + except Exception as e: + logger.error(f"Failed to test MediationSession insufficient parties: {e}") + raise + + +def test_mediation_session_no_mediator(sample_two_agents, sample_task): + try: + mediation = MediationSession( + parties=sample_two_agents, + mediator=None, + max_sessions=2, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="mediator agent is required"): + mediation.run(sample_task) + logger.info("MediationSession no mediator test passed") + except Exception as e: + logger.error(f"Failed to test MediationSession no mediator: {e}") + raise + + +def test_brainstorming_session_initialization(sample_three_agents): + try: + facilitator = create_function_agent("Facilitator") + assert facilitator is not None + assert sample_three_agents is not None + brainstorming = BrainstormingSession( + participants=sample_three_agents, + facilitator=facilitator, + idea_rounds=2, + build_on_ideas=True, + output_type="str-all-except-first", + ) + assert brainstorming is not None + assert len(brainstorming.participants) == 3 + assert brainstorming.facilitator is not None + assert brainstorming.idea_rounds == 2 + assert brainstorming.build_on_ideas is True + logger.info("BrainstormingSession initialization test passed") + except Exception as e: + logger.error(f"Failed to test BrainstormingSession initialization: {e}") + raise + + +def test_brainstorming_session_run(sample_three_agents, sample_task): + try: + facilitator = create_function_agent("Facilitator") + assert facilitator is not None + assert sample_three_agents is not None + brainstorming = BrainstormingSession( + participants=sample_three_agents, + facilitator=facilitator, + idea_rounds=2, + build_on_ideas=True, + output_type="str-all-except-first", + ) + assert brainstorming is not None + result = brainstorming.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("BrainstormingSession run test passed") + except Exception as e: + logger.error(f"Failed to test BrainstormingSession run: {e}") + raise + + +def test_brainstorming_session_insufficient_participants(sample_task): + try: + facilitator = create_function_agent("Facilitator") + single_participant = [create_function_agent("Participant1")] + brainstorming = BrainstormingSession( + participants=single_participant, + facilitator=facilitator, + idea_rounds=2, + build_on_ideas=True, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="At least two participants"): + brainstorming.run(sample_task) + logger.info("BrainstormingSession insufficient participants test passed") + except Exception as e: + logger.error(f"Failed to test BrainstormingSession insufficient participants: {e}") + raise + + +def test_brainstorming_session_no_facilitator(sample_three_agents, sample_task): + try: + brainstorming = BrainstormingSession( + participants=sample_three_agents, + facilitator=None, + idea_rounds=2, + build_on_ideas=True, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="facilitator agent is required"): + brainstorming.run(sample_task) + logger.info("BrainstormingSession no facilitator test passed") + except Exception as e: + logger.error(f"Failed to test BrainstormingSession no facilitator: {e}") + raise + + +def test_trial_simulation_initialization(): + try: + prosecution = create_function_agent("Prosecution") + assert prosecution is not None + defense = create_function_agent("Defense") + assert defense is not None + judge = create_function_agent("Judge") + assert judge is not None + witnesses = [create_function_agent("Witness1")] + assert witnesses is not None + assert len(witnesses) == 1 + assert witnesses[0] is not None + trial = TrialSimulation( + prosecution=prosecution, + defense=defense, + judge=judge, + witnesses=witnesses, + phases=["opening", "closing"], + output_type="str-all-except-first", + ) + assert trial is not None + assert trial.prosecution is not None + assert trial.defense is not None + assert trial.judge is not None + assert len(trial.witnesses) == 1 + assert trial.phases == ["opening", "closing"] + logger.info("TrialSimulation initialization test passed") + except Exception as e: + logger.error(f"Failed to test TrialSimulation initialization: {e}") + raise + + +def test_trial_simulation_run(sample_task): + try: + prosecution = create_function_agent("Prosecution") + assert prosecution is not None + defense = create_function_agent("Defense") + assert defense is not None + judge = create_function_agent("Judge") + assert judge is not None + trial = TrialSimulation( + prosecution=prosecution, + defense=defense, + judge=judge, + witnesses=None, + phases=["opening", "closing"], + output_type="str-all-except-first", + ) + assert trial is not None + result = trial.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("TrialSimulation run test passed") + except Exception as e: + logger.error(f"Failed to test TrialSimulation run: {e}") + raise + + +def test_trial_simulation_no_prosecution(sample_task): + try: + defense = create_function_agent("Defense") + judge = create_function_agent("Judge") + trial = TrialSimulation( + prosecution=None, + defense=defense, + judge=judge, + witnesses=None, + phases=["opening"], + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="all required"): + trial.run(sample_task) + logger.info("TrialSimulation no prosecution test passed") + except Exception as e: + logger.error(f"Failed to test TrialSimulation no prosecution: {e}") + raise + + +def test_trial_simulation_default_phases(sample_task): + try: + prosecution = create_function_agent("Prosecution") + assert prosecution is not None + defense = create_function_agent("Defense") + assert defense is not None + judge = create_function_agent("Judge") + assert judge is not None + assert sample_task is not None + trial = TrialSimulation( + prosecution=prosecution, + defense=defense, + judge=judge, + witnesses=None, + phases=None, + output_type="str-all-except-first", + ) + assert trial is not None + result = trial.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("TrialSimulation default phases test passed") + except Exception as e: + logger.error(f"Failed to test TrialSimulation default phases: {e}") + raise + + +def test_council_meeting_initialization(sample_three_agents): + try: + chairperson = create_function_agent("Chairperson") + assert chairperson is not None + assert sample_three_agents is not None + council = CouncilMeeting( + council_members=sample_three_agents, + chairperson=chairperson, + voting_rounds=2, + require_consensus=False, + output_type="str-all-except-first", + ) + assert council is not None + assert len(council.council_members) == 3 + assert council.chairperson is not None + assert council.voting_rounds == 2 + assert council.require_consensus is False + logger.info("CouncilMeeting initialization test passed") + except Exception as e: + logger.error(f"Failed to test CouncilMeeting initialization: {e}") + raise + + +def test_council_meeting_run(sample_three_agents, sample_task): + try: + chairperson = create_function_agent("Chairperson") + assert chairperson is not None + assert sample_three_agents is not None + council = CouncilMeeting( + council_members=sample_three_agents, + chairperson=chairperson, + voting_rounds=1, + require_consensus=False, + output_type="str-all-except-first", + ) + assert council is not None + result = council.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("CouncilMeeting run test passed") + except Exception as e: + logger.error(f"Failed to test CouncilMeeting run: {e}") + raise + + +def test_council_meeting_insufficient_members(sample_task): + try: + chairperson = create_function_agent("Chairperson") + single_member = [create_function_agent("Member1")] + council = CouncilMeeting( + council_members=single_member, + chairperson=chairperson, + voting_rounds=1, + require_consensus=False, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="At least two council members"): + council.run(sample_task) + logger.info("CouncilMeeting insufficient members test passed") + except Exception as e: + logger.error(f"Failed to test CouncilMeeting insufficient members: {e}") + raise + + +def test_council_meeting_no_chairperson(sample_three_agents, sample_task): + try: + council = CouncilMeeting( + council_members=sample_three_agents, + chairperson=None, + voting_rounds=1, + require_consensus=False, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="chairperson agent is required"): + council.run(sample_task) + logger.info("CouncilMeeting no chairperson test passed") + except Exception as e: + logger.error(f"Failed to test CouncilMeeting no chairperson: {e}") + raise + + +def test_mentorship_session_initialization(): + try: + mentor = create_function_agent("Mentor") + assert mentor is not None + mentee = create_function_agent("Mentee") + assert mentee is not None + mentorship = MentorshipSession( + mentor=mentor, + mentee=mentee, + session_count=2, + include_feedback=True, + output_type="str-all-except-first", + ) + assert mentorship is not None + assert mentorship.mentor is not None + assert mentorship.mentee is not None + assert mentorship.session_count == 2 + assert mentorship.include_feedback is True + logger.info("MentorshipSession initialization test passed") + except Exception as e: + logger.error(f"Failed to test MentorshipSession initialization: {e}") + raise + + +def test_mentorship_session_run(sample_task): + try: + mentor = create_function_agent("Mentor") + assert mentor is not None + mentee = create_function_agent("Mentee") + assert mentee is not None + mentorship = MentorshipSession( + mentor=mentor, + mentee=mentee, + session_count=2, + include_feedback=True, + output_type="str-all-except-first", + ) + assert mentorship is not None + result = mentorship.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("MentorshipSession run test passed") + except Exception as e: + logger.error(f"Failed to test MentorshipSession run: {e}") + raise + + +def test_mentorship_session_no_mentor(sample_task): + try: + mentee = create_function_agent("Mentee") + mentorship = MentorshipSession( + mentor=None, + mentee=mentee, + session_count=2, + include_feedback=True, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="Both mentor and mentee"): + mentorship.run(sample_task) + logger.info("MentorshipSession no mentor test passed") + except Exception as e: + logger.error(f"Failed to test MentorshipSession no mentor: {e}") + raise + + +def test_mentorship_session_no_mentee(sample_task): + try: + mentor = create_function_agent("Mentor") + mentorship = MentorshipSession( + mentor=mentor, + mentee=None, + session_count=2, + include_feedback=True, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="Both mentor and mentee"): + mentorship.run(sample_task) + logger.info("MentorshipSession no mentee test passed") + except Exception as e: + logger.error(f"Failed to test MentorshipSession no mentee: {e}") + raise + + +def test_negotiation_session_initialization(sample_two_agents): + try: + mediator = create_function_agent("Mediator") + assert mediator is not None + assert sample_two_agents is not None + negotiation = NegotiationSession( + parties=sample_two_agents, + mediator=mediator, + negotiation_rounds=3, + include_concessions=True, + output_type="str-all-except-first", + ) + assert negotiation is not None + assert len(negotiation.parties) == 2 + assert negotiation.mediator is not None + assert negotiation.negotiation_rounds == 3 + assert negotiation.include_concessions is True + logger.info("NegotiationSession initialization test passed") + except Exception as e: + logger.error(f"Failed to test NegotiationSession initialization: {e}") + raise + + +def test_negotiation_session_run(sample_two_agents, sample_task): + try: + mediator = create_function_agent("Mediator") + assert mediator is not None + assert sample_two_agents is not None + negotiation = NegotiationSession( + parties=sample_two_agents, + mediator=mediator, + negotiation_rounds=2, + include_concessions=True, + output_type="str-all-except-first", + ) + assert negotiation is not None + result = negotiation.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("NegotiationSession run test passed") + except Exception as e: + logger.error(f"Failed to test NegotiationSession run: {e}") + raise + + +def test_negotiation_session_insufficient_parties(sample_task): + try: + mediator = create_function_agent("Mediator") + single_party = [create_function_agent("Party1")] + negotiation = NegotiationSession( + parties=single_party, + mediator=mediator, + negotiation_rounds=2, + include_concessions=True, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="At least two parties"): + negotiation.run(sample_task) + logger.info("NegotiationSession insufficient parties test passed") + except Exception as e: + logger.error(f"Failed to test NegotiationSession insufficient parties: {e}") + raise + + +def test_negotiation_session_no_mediator(sample_two_agents, sample_task): + try: + negotiation = NegotiationSession( + parties=sample_two_agents, + mediator=None, + negotiation_rounds=2, + include_concessions=True, + output_type="str-all-except-first", + ) + with pytest.raises(ValueError, match="mediator agent is required"): + negotiation.run(sample_task) + logger.info("NegotiationSession no mediator test passed") + except Exception as e: + logger.error(f"Failed to test NegotiationSession no mediator: {e}") + raise + + +def test_negotiation_session_without_concessions(sample_two_agents, sample_task): + try: + mediator = create_function_agent("Mediator") + assert mediator is not None + assert sample_two_agents is not None + negotiation = NegotiationSession( + parties=sample_two_agents, + mediator=mediator, + negotiation_rounds=2, + include_concessions=False, + output_type="str-all-except-first", + ) + assert negotiation is not None + result = negotiation.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("NegotiationSession without concessions test passed") + except Exception as e: + logger.error(f"Failed to test NegotiationSession without concessions: {e}") + raise + + +def test_one_on_one_debate_multiple_loops(sample_two_agents, sample_task): + try: + assert sample_two_agents is not None + debate = OneOnOneDebate( + max_loops=5, + agents=sample_two_agents, + output_type="str-all-except-first", + ) + assert debate is not None + result = debate.run(sample_task) + assert result is not None + assert isinstance(result, str) + assert len(result) >= 0 + logger.info("OneOnOneDebate multiple loops test passed") + except Exception as e: + logger.error(f"Failed to test OneOnOneDebate multiple loops: {e}") + raise + + +def test_expert_panel_discussion_output_types(sample_three_agents, sample_task): + try: + moderator = create_function_agent("Moderator") + assert moderator is not None + assert sample_three_agents is not None + output_types = ["str-all-except-first", "list", "dict", "str"] + assert output_types is not None + for output_type in output_types: + panel = ExpertPanelDiscussion( + max_rounds=1, + agents=sample_three_agents, + moderator=moderator, + output_type=output_type, + ) + assert panel is not None + result = panel.run(sample_task) + assert result is not None + if output_type == "list": + assert isinstance(result, list) + elif output_type == "dict": + assert isinstance(result, (dict, list)) + else: + assert isinstance(result, str) + logger.info("ExpertPanelDiscussion output types test passed") + except Exception as e: + logger.error(f"Failed to test ExpertPanelDiscussion output types: {e}") + raise \ No newline at end of file From 17e4de48874450200bdb45c9efa1e7562bb3c0b8 Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 20:32:38 -0800 Subject: [PATCH 20/42] fixed auto swarm builder issues --- swarms/structs/auto_swarm_builder.py | 4 ++++ tests/structs/test_auto_swarms_builder.py | 21 ++++++++++++++------- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/swarms/structs/auto_swarm_builder.py b/swarms/structs/auto_swarm_builder.py index 514cb79c..08c75164 100644 --- a/swarms/structs/auto_swarm_builder.py +++ b/swarms/structs/auto_swarm_builder.py @@ -407,6 +407,8 @@ class AutoSwarmBuilder: agents_dictionary = model.run(task) + agents_dictionary = json.loads(agents_dictionary) + return agents_dictionary except Exception as e: @@ -437,6 +439,8 @@ class AutoSwarmBuilder: f"Create the swarm spec for the following task: {task}" ) + swarm_spec = json.loads(swarm_spec) + print(swarm_spec) print(type(swarm_spec)) diff --git a/tests/structs/test_auto_swarms_builder.py b/tests/structs/test_auto_swarms_builder.py index a1e9085a..547c032d 100644 --- a/tests/structs/test_auto_swarms_builder.py +++ b/tests/structs/test_auto_swarms_builder.py @@ -46,16 +46,22 @@ def test_agent_building(): print("Testing Agent Building") try: swarm = AutoSwarmBuilder() - agent = swarm.build_agent( + + # Create agent spec + agent_spec = AgentSpec( agent_name="TestAgent", - agent_description="A test agent", - agent_system_prompt="You are a test agent", + description="A test agent", + system_prompt="You are a test agent", max_loops=1, ) + # Create agent from spec + agents = swarm.create_agents_from_specs({"agents": [agent_spec]}) + agent = agents[0] + print("βœ“ Built agent with configuration:") print(f" - Name: {agent.agent_name}") - print(f" - Description: {agent.description}") + print(f" - Description: {agent.agent_description}") print(f" - Max loops: {agent.max_loops}") print("βœ“ Agent building test passed") return agent @@ -74,13 +80,14 @@ def test_agent_creation(): description="A swarm for research tasks", ) task = "Research the latest developments in quantum computing" - agents = swarm._create_agents(task) + agents_dict = swarm.create_agents(task) + agents = swarm.create_agents_from_specs(agents_dict) print("βœ“ Created agents for research task:") for i, agent in enumerate(agents, 1): print(f" Agent {i}:") print(f" - Name: {agent.agent_name}") - print(f" - Description: {agent.description}") + print(f" - Description: {agent.agent_description}") print(f"βœ“ Created {len(agents)} agents successfully") return agents except Exception as e: @@ -103,7 +110,7 @@ def test_swarm_routing(): task = "Analyze the impact of AI on healthcare" print("Starting task routing...") - result = swarm.swarm_router(agents, task) + result = swarm.initialize_swarm_router(agents, task) print("βœ“ Task routed successfully") print( From c8e4cd68116d493fae76c116f40b3de9a2548334 Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 20:55:46 -0800 Subject: [PATCH 21/42] fixed reasoning agent to not use sonnet --- swarms/agents/reasoning_agents.py | 2 +- swarms/agents/reasoning_duo.py | 2 +- tests/structs/test_reasoning_agent_router.py | 5 ++++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py index 122ccb01..749002db 100644 --- a/swarms/agents/reasoning_agents.py +++ b/swarms/agents/reasoning_agents.py @@ -90,7 +90,7 @@ class ReasoningAgentRouter: majority_voting_prompt: Optional[str] = None, reasoning_model_name: Optional[ str - ] = "claude-3-5-sonnet-20240620", + ] = "gpt-4o", ): """ Initialize the ReasoningAgentRouter with the specified configuration. diff --git a/swarms/agents/reasoning_duo.py b/swarms/agents/reasoning_duo.py index c0ddc156..81fa0310 100644 --- a/swarms/agents/reasoning_duo.py +++ b/swarms/agents/reasoning_duo.py @@ -37,7 +37,7 @@ class ReasoningDuo: output_type: OutputType = "dict-all-except-first", reasoning_model_name: Optional[ str - ] = "claude-3-5-sonnet-20240620", + ] = "gpt-4o", max_loops: int = 1, *args, **kwargs, diff --git a/tests/structs/test_reasoning_agent_router.py b/tests/structs/test_reasoning_agent_router.py index cf5a8782..2507058c 100644 --- a/tests/structs/test_reasoning_agent_router.py +++ b/tests/structs/test_reasoning_agent_router.py @@ -6,6 +6,9 @@ from swarms.agents.reasoning_agents import ( ReasoningAgentInitializationError, ReasoningAgentRouter, ) +from dotenv import load_dotenv + +load_dotenv() def test_router_initialization(): @@ -55,7 +58,7 @@ def test_router_initialization(): eval=True, random_models_on=True, majority_voting_prompt="Custom voting prompt", - reasoning_model_name="claude-3-5-sonnet-20240620", + reasoning_model_name="gpt-4o", ) assert ( custom_router is not None From 9bec2f08f827e974aefcee57929e0d3390d3acc6 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 19 Nov 2025 21:06:53 -0800 Subject: [PATCH 22/42] [Improvement][aop auth handling + thank you to our users] --- README.md | 8 +++++ example.py | 3 +- examples/aop_examples/server.py | 2 +- swarms/structs/aop.py | 61 +++++++++++++++++++++++++++++++-- 4 files changed, 68 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 89e713ec..9aa87e03 100644 --- a/README.md +++ b/README.md @@ -831,6 +831,14 @@ Thank you for contributing to swarms. Your work is extremely appreciated and rec +### πŸ™ Thank You to Our Community + +We're incredibly grateful to everyone who supports Swarms! Your stars, forks, and contributions help make this project better every day. + +[![Forkers repo roster for @kyegomez/swarms](https://reporoster.com/forks/kyegomez/swarms)](https://github.com/kyegomez/swarms/network/members) + +[![Stargazers repo roster for @kyegomez/swarms](https://reporoster.com/stars/kyegomez/swarms)](https://github.com/kyegomez/swarms/stargazers) + ----- ## Join the Swarms community πŸ‘ΎπŸ‘ΎπŸ‘Ύ diff --git a/example.py b/example.py index 42959ded..d13636db 100644 --- a/example.py +++ b/example.py @@ -1,4 +1,3 @@ - from swarms import Agent # Initialize the agent @@ -11,7 +10,7 @@ agent = Agent( dynamic_context_window=True, streaming_on=False, top_p=None, - stream=True, + # stream=True, ) out = agent.run( diff --git a/examples/aop_examples/server.py b/examples/aop_examples/server.py index 89420fed..adcaaa2c 100644 --- a/examples/aop_examples/server.py +++ b/examples/aop_examples/server.py @@ -92,7 +92,7 @@ financial_agent = Agent( ) # Basic usage - individual agent addition -deployer = AOP(server_name="MyAgentServer", verbose=True, port=5932) +deployer = AOP(server_name="MyAgentServer", verbose=True, port=5932, json_response=True, queue_enabled=False) agents = [ research_agent, diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index 17a58547..a8f7bea4 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -1,4 +1,5 @@ import asyncio +from contextlib import AbstractAsyncContextManager import socket import sys import threading @@ -7,11 +8,12 @@ import traceback from collections import deque from dataclasses import dataclass, field from enum import Enum -from typing import Any, Dict, List, Literal, Optional +from typing import Any, Callable, Dict, List, Literal, Optional from uuid import uuid4 from loguru import logger from mcp.server.fastmcp import FastMCP +from mcp.server.lowlevel.server import LifespanResultT from swarms.structs.agent import Agent from swarms.structs.omni_agent_types import AgentType @@ -19,6 +21,7 @@ from swarms.tools.mcp_client_tools import ( get_tools_for_multiple_mcp_servers, ) +from mcp.server.fastmcp import AuthSettings, TransportSecuritySettings class TaskStatus(Enum): """Status of a task in the queue.""" @@ -600,6 +603,9 @@ class AOP: log_level: Literal[ "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" ] = "INFO", + lifespan: Callable[[FastMCP[LifespanResultT]], AbstractAsyncContextManager[LifespanResultT]] | None = None, + auth: AuthSettings | None = None, + transport_security: TransportSecuritySettings | None = None, *args, **kwargs, ): @@ -670,6 +676,9 @@ class AOP: name=server_name, port=port, log_level=log_level, + lifespan=lifespan, + auth=auth, + transport_security=transport_security, *args, **kwargs, ) @@ -1122,6 +1131,28 @@ class AOP: Returns: Dict containing the result or task information """ + # Safety check: ensure queue is enabled + if not self.queue_enabled: + logger.error( + f"Queue execution attempted but queue is disabled for tool '{tool_name}'" + ) + return { + "result": "", + "success": False, + "error": "Queue system is disabled", + } + + # Safety check: ensure task queue exists + if tool_name not in self.task_queues: + logger.error( + f"Task queue not found for tool '{tool_name}'" + ) + return { + "result": "", + "success": False, + "error": f"Task queue not found for agent '{tool_name}'", + } + try: # Use config max_retries if not specified if max_retries is None: @@ -1176,6 +1207,30 @@ class AOP: Returns: Dict containing the task result """ + # Safety check: ensure queue is enabled + if not self.queue_enabled: + logger.error( + f"Task completion wait attempted but queue is disabled for tool '{tool_name}'" + ) + return { + "result": "", + "success": False, + "error": "Queue system is disabled", + "task_id": task_id, + } + + # Safety check: ensure task queue exists + if tool_name not in self.task_queues: + logger.error( + f"Task queue not found for tool '{tool_name}'" + ) + return { + "result": "", + "success": False, + "error": f"Task queue not found for agent '{tool_name}'", + "task_id": task_id, + } + start_time = time.time() while time.time() - start_time < timeout: @@ -1287,8 +1342,8 @@ class AOP: bool: True if agent was removed, False if not found """ if tool_name in self.agents: - # Stop and remove task queue if it exists - if tool_name in self.task_queues: + # Stop and remove task queue if it exists and queue is enabled + if self.queue_enabled and tool_name in self.task_queues: self.task_queues[tool_name].stop_workers() del self.task_queues[tool_name] From 59857a2d44a9dbba8cb42560ee73c9f118b1156c Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 21:08:03 -0800 Subject: [PATCH 23/42] addedf raise to hierarchical --- swarms/structs/hiearchical_swarm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py index 1501ccb6..873d56b1 100644 --- a/swarms/structs/hiearchical_swarm.py +++ b/swarms/structs/hiearchical_swarm.py @@ -914,6 +914,7 @@ class HierarchicalSwarm: logger.error( f"{error_msg}\n[TRACE] Traceback: {traceback.format_exc()}\n[BUG] If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues" ) + raise def agents_no_print(self): for agent in self.agents: From b7e681373ba76ef598f93892eddc08056fe624a0 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Wed, 19 Nov 2025 21:09:24 -0800 Subject: [PATCH 24/42] readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9aa87e03..11241094 100644 --- a/README.md +++ b/README.md @@ -823,7 +823,7 @@ We've made it easy to start contributing. Here's how you can help: 4. **Join the Discussion:** To participate in roadmap discussions and connect with other developers, join our community on [**Discord**](https://discord.gg/EamjgSaEQf). -### ✨ Our Valued Contributors +### ✨ Thank You to Our Contributors Thank you for contributing to swarms. Your work is extremely appreciated and recognized. From a59e8c8dc0b9f5bd8ef0bbbfba251c29440543d2 Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 21:28:21 -0800 Subject: [PATCH 25/42] removed useless tests --- tests/structs/test_sequential_workflow.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py index f905fe47..e4a48a20 100644 --- a/tests/structs/test_sequential_workflow.py +++ b/tests/structs/test_sequential_workflow.py @@ -3,12 +3,6 @@ import pytest from swarms import Agent, SequentialWorkflow -# Test SequentialWorkflow class -def test_sequential_workflow_initialization(): - # SequentialWorkflow requires agents, so expect ValueError - with pytest.raises(ValueError, match="Agents list cannot be None or empty"): - workflow = SequentialWorkflow() - def test_sequential_workflow_initialization_with_agents(): """Test SequentialWorkflow initialization with agents""" From aeb5044bf7abf6848a8617fc1e1e112ac50830f3 Mon Sep 17 00:00:00 2001 From: Steve-Dusty <66390533+Steve-Dusty@users.noreply.github.com> Date: Thu, 20 Nov 2025 19:18:11 -0800 Subject: [PATCH 26/42] added raise e --- swarms/structs/hiearchical_swarm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py index 873d56b1..49b63595 100644 --- a/swarms/structs/hiearchical_swarm.py +++ b/swarms/structs/hiearchical_swarm.py @@ -914,7 +914,7 @@ class HierarchicalSwarm: logger.error( f"{error_msg}\n[TRACE] Traceback: {traceback.format_exc()}\n[BUG] If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues" ) - raise + raise e def agents_no_print(self): for agent in self.agents: From fea72a12e1796913e613acddb0881698da5c1ee7 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 21 Nov 2025 14:40:10 -0800 Subject: [PATCH 27/42] [FEAT][rustworkx integration into GraphWorkflow] [New Examples] --- example.py | 4 +- examples/aop_examples/server.py | 8 +- .../rustworkx_examples/01_basic_usage.py | 46 ++ .../02_backend_comparison.py | 56 ++ .../03_fan_out_fan_in_patterns.py | 73 ++ .../rustworkx_examples/04_complex_workflow.py | 101 +++ .../05_performance_benchmark.py | 104 +++ .../rustworkx_examples/06_error_handling.py | 55 ++ .../07_large_scale_workflow.py | 61 ++ .../08_parallel_chain_example.py | 73 ++ .../09_workflow_validation.py | 79 +++ .../10_real_world_scenario.py | 122 ++++ ...n_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png | Bin 0 -> 28198 bytes .../rustworkx_examples/README.md | 156 +++++ .../test_graph_workflow_rustworkx.py | 632 +++++++++++++++++ .../multi_agent/swarm_router/swarm_router.py | 1 - swarms/structs/aop.py | 11 +- swarms/structs/graph_workflow.py | 654 +++++++++++++++++- tests/structs/test_custom_agent.py | 144 +++- tests/structs/test_deep_discussion.py | 77 ++- tests/structs/test_graph_workflow.py | 552 +++++++++++++++ .../test_graph_workflow_comprehensive.py | 225 ------ tests/structs/test_multi_agent_debate.py | 312 ++++++--- 23 files changed, 3134 insertions(+), 412 deletions(-) create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/Fan-Out-Fan-In-Workflow_visualization_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py create mode 100644 tests/structs/test_graph_workflow.py delete mode 100644 tests/structs/test_graph_workflow_comprehensive.py diff --git a/example.py b/example.py index d13636db..386b6597 100644 --- a/example.py +++ b/example.py @@ -10,7 +10,6 @@ agent = Agent( dynamic_context_window=True, streaming_on=False, top_p=None, - # stream=True, ) out = agent.run( @@ -18,5 +17,4 @@ out = agent.run( n=1, ) -for token in out: - print(token, end="", flush=True) +print(out) diff --git a/examples/aop_examples/server.py b/examples/aop_examples/server.py index adcaaa2c..b91bcbaa 100644 --- a/examples/aop_examples/server.py +++ b/examples/aop_examples/server.py @@ -92,7 +92,13 @@ financial_agent = Agent( ) # Basic usage - individual agent addition -deployer = AOP(server_name="MyAgentServer", verbose=True, port=5932, json_response=True, queue_enabled=False) +deployer = AOP( + server_name="MyAgentServer", + verbose=True, + port=5932, + json_response=True, + queue_enabled=False, +) agents = [ research_agent, diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py new file mode 100644 index 00000000..a9d0a344 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py @@ -0,0 +1,46 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +research_agent = Agent( + agent_name="Research-Analyst", + agent_description="Specialized in comprehensive research and data gathering", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +analysis_agent = Agent( + agent_name="Data-Analyst", + agent_description="Expert in data analysis and pattern recognition", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_agent = Agent( + agent_name="Strategy-Consultant", + agent_description="Specialized in strategic planning and recommendations", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Rustworkx-Basic-Workflow", + description="Basic workflow using rustworkx backend for faster graph operations", + backend="rustworkx", + verbose=False, +) + +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) +workflow.add_node(strategy_agent) + +workflow.add_edge(research_agent, analysis_agent) +workflow.add_edge(analysis_agent, strategy_agent) + +task = "Conduct a research analysis on water stocks and ETFs" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py new file mode 100644 index 00000000..35cfe83e --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py @@ -0,0 +1,56 @@ +import time +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agents = [ + Agent( + agent_name=f"Agent-{i}", + agent_description=f"Agent number {i}", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(5) +] + +nx_workflow = GraphWorkflow( + name="NetworkX-Workflow", + backend="networkx", + verbose=False, +) + +for agent in agents: + nx_workflow.add_node(agent) + +for i in range(len(agents) - 1): + nx_workflow.add_edge(agents[i], agents[i + 1]) + +nx_start = time.time() +nx_workflow.compile() +nx_compile_time = time.time() - nx_start + +rx_workflow = GraphWorkflow( + name="Rustworkx-Workflow", + backend="rustworkx", + verbose=False, +) + +for agent in agents: + rx_workflow.add_node(agent) + +for i in range(len(agents) - 1): + rx_workflow.add_edge(agents[i], agents[i + 1]) + +rx_start = time.time() +rx_workflow.compile() +rx_compile_time = time.time() - rx_start + +speedup = ( + nx_compile_time / rx_compile_time if rx_compile_time > 0 else 0 +) +print(f"NetworkX compile time: {nx_compile_time:.4f}s") +print(f"Rustworkx compile time: {rx_compile_time:.4f}s") +print(f"Speedup: {speedup:.2f}x") +print( + f"Identical layers: {nx_workflow._sorted_layers == rx_workflow._sorted_layers}" +) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py new file mode 100644 index 00000000..8be4fecf --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py @@ -0,0 +1,73 @@ +from swarms import Agent, GraphWorkflow + +coordinator = Agent( + agent_name="Coordinator", + agent_description="Coordinates and distributes tasks", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +tech_analyst = Agent( + agent_name="Tech-Analyst", + agent_description="Technical analysis specialist", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +fundamental_analyst = Agent( + agent_name="Fundamental-Analyst", + agent_description="Fundamental analysis specialist", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +sentiment_analyst = Agent( + agent_name="Sentiment-Analyst", + agent_description="Sentiment analysis specialist", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +synthesis_agent = Agent( + agent_name="Synthesis-Agent", + agent_description="Synthesizes multiple analyses into final report", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Fan-Out-Fan-In-Workflow", + description="Demonstrates parallel processing patterns with rustworkx", + backend="rustworkx", + verbose=False, +) + +workflow.add_node(coordinator) +workflow.add_node(tech_analyst) +workflow.add_node(fundamental_analyst) +workflow.add_node(sentiment_analyst) +workflow.add_node(synthesis_agent) + +workflow.add_edges_from_source( + coordinator, + [tech_analyst, fundamental_analyst, sentiment_analyst], +) + +workflow.add_edges_to_target( + [tech_analyst, fundamental_analyst, sentiment_analyst], + synthesis_agent, +) + +task = "Analyze Tesla stock from technical, fundamental, and sentiment perspectives" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") + + +workflow.visualize(view=True) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py new file mode 100644 index 00000000..4f025a71 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py @@ -0,0 +1,101 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +data_collector_1 = Agent( + agent_name="Data-Collector-1", + agent_description="Collects market data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +data_collector_2 = Agent( + agent_name="Data-Collector-2", + agent_description="Collects financial data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +technical_analyst = Agent( + agent_name="Technical-Analyst", + agent_description="Performs technical analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +fundamental_analyst = Agent( + agent_name="Fundamental-Analyst", + agent_description="Performs fundamental analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +risk_analyst = Agent( + agent_name="Risk-Analyst", + agent_description="Performs risk analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_consultant = Agent( + agent_name="Strategy-Consultant", + agent_description="Develops strategic recommendations", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +report_writer = Agent( + agent_name="Report-Writer", + agent_description="Writes comprehensive reports", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Complex-Multi-Layer-Workflow", + description="Complex workflow with multiple layers and parallel processing", + backend="rustworkx", + verbose=False, +) + +all_agents = [ + data_collector_1, + data_collector_2, + technical_analyst, + fundamental_analyst, + risk_analyst, + strategy_consultant, + report_writer, +] + +for agent in all_agents: + workflow.add_node(agent) + +workflow.add_parallel_chain( + [data_collector_1, data_collector_2], + [technical_analyst, fundamental_analyst, risk_analyst], +) + +workflow.add_edges_to_target( + [technical_analyst, fundamental_analyst, risk_analyst], + strategy_consultant, +) + +workflow.add_edges_to_target( + [technical_analyst, fundamental_analyst, risk_analyst], + report_writer, +) + +workflow.add_edge(strategy_consultant, report_writer) + +task = "Conduct a comprehensive analysis of the renewable energy sector including market trends, financial health, and risk assessment" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py new file mode 100644 index 00000000..2b5251f7 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py @@ -0,0 +1,104 @@ +import time +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agents_small = [ + Agent( + agent_name=f"Agent-{i}", + agent_description=f"Agent number {i}", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(5) +] + +agents_medium = [ + Agent( + agent_name=f"Agent-{i}", + agent_description=f"Agent number {i}", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(20) +] + +nx_workflow_small = GraphWorkflow( + name="NetworkX-Small", + backend="networkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_small: + nx_workflow_small.add_node(agent) + +for i in range(len(agents_small) - 1): + nx_workflow_small.add_edge(agents_small[i], agents_small[i + 1]) + +nx_start = time.time() +nx_workflow_small.compile() +nx_small_time = time.time() - nx_start + +rx_workflow_small = GraphWorkflow( + name="Rustworkx-Small", + backend="rustworkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_small: + rx_workflow_small.add_node(agent) + +for i in range(len(agents_small) - 1): + rx_workflow_small.add_edge(agents_small[i], agents_small[i + 1]) + +rx_start = time.time() +rx_workflow_small.compile() +rx_small_time = time.time() - rx_start + +nx_workflow_medium = GraphWorkflow( + name="NetworkX-Medium", + backend="networkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_medium: + nx_workflow_medium.add_node(agent) + +for i in range(len(agents_medium) - 1): + nx_workflow_medium.add_edge( + agents_medium[i], agents_medium[i + 1] + ) + +nx_start = time.time() +nx_workflow_medium.compile() +nx_medium_time = time.time() - nx_start + +rx_workflow_medium = GraphWorkflow( + name="Rustworkx-Medium", + backend="rustworkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_medium: + rx_workflow_medium.add_node(agent) + +for i in range(len(agents_medium) - 1): + rx_workflow_medium.add_edge( + agents_medium[i], agents_medium[i + 1] + ) + +rx_start = time.time() +rx_workflow_medium.compile() +rx_medium_time = time.time() - rx_start + +print( + f"Small (5 agents) - NetworkX: {nx_small_time:.4f}s, Rustworkx: {rx_small_time:.4f}s, Speedup: {nx_small_time/rx_small_time if rx_small_time > 0 else 0:.2f}x" +) +print( + f"Medium (20 agents) - NetworkX: {nx_medium_time:.4f}s, Rustworkx: {rx_medium_time:.4f}s, Speedup: {nx_medium_time/rx_medium_time if rx_medium_time > 0 else 0:.2f}x" +) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py new file mode 100644 index 00000000..3fd9f25c --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py @@ -0,0 +1,55 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +test_agent = Agent( + agent_name="Test-Agent", + agent_description="Test agent for error handling", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow_rx = GraphWorkflow( + name="Rustworkx-Workflow", + backend="rustworkx", + verbose=False, +) +workflow_rx.add_node(test_agent) + +workflow_nx = GraphWorkflow( + name="NetworkX-Workflow", + backend="networkx", + verbose=False, +) +workflow_nx.add_node(test_agent) + +workflow_default = GraphWorkflow( + name="Default-Workflow", + verbose=False, +) +workflow_default.add_node(test_agent) + +workflow_invalid = GraphWorkflow( + name="Invalid-Workflow", + backend="invalid_backend", + verbose=False, +) +workflow_invalid.add_node(test_agent) + +print( + f"Rustworkx backend: {type(workflow_rx.graph_backend).__name__}" +) +print(f"NetworkX backend: {type(workflow_nx.graph_backend).__name__}") +print( + f"Default backend: {type(workflow_default.graph_backend).__name__}" +) +print( + f"Invalid backend fallback: {type(workflow_invalid.graph_backend).__name__}" +) + +try: + import rustworkx as rx + + print("Rustworkx available: True") +except ImportError: + print("Rustworkx available: False") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py new file mode 100644 index 00000000..edaeef0c --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py @@ -0,0 +1,61 @@ +import time +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +NUM_AGENTS = 30 + +agents = [ + Agent( + agent_name=f"Agent-{i:02d}", + agent_description=f"Agent number {i} in large-scale workflow", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(NUM_AGENTS) +] + +workflow = GraphWorkflow( + name="Large-Scale-Workflow", + description=f"Large-scale workflow with {NUM_AGENTS} agents using rustworkx", + backend="rustworkx", + verbose=False, +) + +start_time = time.time() +for agent in agents: + workflow.add_node(agent) +add_nodes_time = time.time() - start_time + +start_time = time.time() +for i in range(9): + workflow.add_edge(agents[i], agents[i + 1]) + +workflow.add_edges_from_source( + agents[5], + agents[10:20], +) + +workflow.add_edges_to_target( + agents[10:20], + agents[20], +) + +for i in range(20, 29): + workflow.add_edge(agents[i], agents[i + 1]) + +add_edges_time = time.time() - start_time + +start_time = time.time() +workflow.compile() +compile_time = time.time() - start_time + +print( + f"Agents: {len(workflow.nodes)}, Edges: {len(workflow.edges)}, Layers: {len(workflow._sorted_layers)}" +) +print( + f"Node addition: {add_nodes_time:.4f}s, Edge addition: {add_edges_time:.4f}s, Compilation: {compile_time:.4f}s" +) +print( + f"Total setup: {add_nodes_time + add_edges_time + compile_time:.4f}s" +) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py new file mode 100644 index 00000000..21b18d23 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py @@ -0,0 +1,73 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +data_collector_1 = Agent( + agent_name="Data-Collector-1", + agent_description="Collects market data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +data_collector_2 = Agent( + agent_name="Data-Collector-2", + agent_description="Collects financial data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +data_collector_3 = Agent( + agent_name="Data-Collector-3", + agent_description="Collects news data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +technical_analyst = Agent( + agent_name="Technical-Analyst", + agent_description="Performs technical analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +fundamental_analyst = Agent( + agent_name="Fundamental-Analyst", + agent_description="Performs fundamental analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +sentiment_analyst = Agent( + agent_name="Sentiment-Analyst", + agent_description="Performs sentiment analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Parallel-Chain-Workflow", + description="Demonstrates parallel chain pattern with rustworkx", + backend="rustworkx", + verbose=False, +) + +sources = [data_collector_1, data_collector_2, data_collector_3] +targets = [technical_analyst, fundamental_analyst, sentiment_analyst] + +for agent in sources + targets: + workflow.add_node(agent) + +workflow.add_parallel_chain(sources, targets) + +workflow.compile() + +task = "Analyze the technology sector using multiple data sources and analysis methods" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py new file mode 100644 index 00000000..79c2de3d --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py @@ -0,0 +1,79 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agent_a = Agent( + agent_name="Agent-A", + agent_description="Agent A", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +agent_b = Agent( + agent_name="Agent-B", + agent_description="Agent B", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +agent_c = Agent( + agent_name="Agent-C", + agent_description="Agent C", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +agent_isolated = Agent( + agent_name="Agent-Isolated", + agent_description="Isolated agent with no connections", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Validation-Workflow", + description="Workflow for validation testing", + backend="rustworkx", + verbose=False, +) + +workflow.add_node(agent_a) +workflow.add_node(agent_b) +workflow.add_node(agent_c) +workflow.add_node(agent_isolated) + +workflow.add_edge(agent_a, agent_b) +workflow.add_edge(agent_b, agent_c) + +validation_result = workflow.validate(auto_fix=False) +print(f"Valid: {validation_result['is_valid']}") +print(f"Warnings: {len(validation_result['warnings'])}") +print(f"Errors: {len(validation_result['errors'])}") + +validation_result_fixed = workflow.validate(auto_fix=True) +print( + f"After auto-fix - Valid: {validation_result_fixed['is_valid']}" +) +print(f"Fixed: {len(validation_result_fixed['fixed'])}") +print(f"Entry points: {workflow.entry_points}") +print(f"End points: {workflow.end_points}") + +workflow_cycle = GraphWorkflow( + name="Cycle-Test-Workflow", + backend="rustworkx", + verbose=False, +) + +workflow_cycle.add_node(agent_a) +workflow_cycle.add_node(agent_b) +workflow_cycle.add_node(agent_c) + +workflow_cycle.add_edge(agent_a, agent_b) +workflow_cycle.add_edge(agent_b, agent_c) +workflow_cycle.add_edge(agent_c, agent_a) + +cycle_validation = workflow_cycle.validate(auto_fix=False) +print(f"Cycles detected: {len(cycle_validation.get('cycles', []))}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py new file mode 100644 index 00000000..cc6e83ff --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py @@ -0,0 +1,122 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +market_researcher = Agent( + agent_name="Market-Researcher", + agent_description="Conducts comprehensive market research and data collection", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +competitor_analyst = Agent( + agent_name="Competitor-Analyst", + agent_description="Analyzes competitor landscape and positioning", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +market_analyst = Agent( + agent_name="Market-Analyst", + agent_description="Analyzes market trends and opportunities", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +financial_analyst = Agent( + agent_name="Financial-Analyst", + agent_description="Analyzes financial metrics and projections", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +risk_analyst = Agent( + agent_name="Risk-Analyst", + agent_description="Assesses market risks and challenges", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_consultant = Agent( + agent_name="Strategy-Consultant", + agent_description="Develops strategic recommendations based on all analyses", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +report_writer = Agent( + agent_name="Report-Writer", + agent_description="Compiles comprehensive market research report", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +executive_summary_writer = Agent( + agent_name="Executive-Summary-Writer", + agent_description="Creates executive summary for leadership", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Market-Research-Workflow", + description="Real-world market research workflow using rustworkx backend", + backend="rustworkx", + verbose=False, +) + +all_agents = [ + market_researcher, + competitor_analyst, + market_analyst, + financial_analyst, + risk_analyst, + strategy_consultant, + report_writer, + executive_summary_writer, +] + +for agent in all_agents: + workflow.add_node(agent) + +workflow.add_parallel_chain( + [market_researcher, competitor_analyst], + [market_analyst, financial_analyst, risk_analyst], +) + +workflow.add_edges_to_target( + [market_analyst, financial_analyst, risk_analyst], + strategy_consultant, +) + +workflow.add_edges_from_source( + strategy_consultant, + [report_writer, executive_summary_writer], +) + +workflow.add_edges_to_target( + [market_analyst, financial_analyst, risk_analyst], + report_writer, +) + +task = """ +Conduct a comprehensive market research analysis on the electric vehicle (EV) industry: +1. Research current market size, growth trends, and key players +2. Analyze competitor landscape and market positioning +3. Assess financial opportunities and investment potential +4. Evaluate risks and challenges in the EV market +5. Develop strategic recommendations +6. Create detailed report and executive summary +""" + +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/Fan-Out-Fan-In-Workflow_visualization_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/Fan-Out-Fan-In-Workflow_visualization_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png new file mode 100644 index 0000000000000000000000000000000000000000..d45a9a2d1512dffddda5f01c187b8136725a543e GIT binary patch literal 28198 zcmb@uWmJ`G)Hb>R=~U?yR1j&9kd#gVY3UG=knR$sTckq~=~6(tk(5>h>F!24J#+2v z{mvNg80Y61d#|w&p7pFdW?XYV*L8=fD$C;IP~spE2z+@tDK!KFB@bR=SQzk|g$cYW zc*8JNl$ApKL;m-vF)t2*xPy?F64!7~-N1&!3;& zEbDk1DsxNmme=L|_wD&yRs4x#rl!Bg%s7mtw?vM=U_HdicebqiFnw_?K2}rO!eH=m z@Ns(Ez*I`Dy6eXeDMBanoSo!Y^nveE7m2Ftte?P-#4@d6L=tjQlK(%wVYfun2n|)+ z8R_Wg__fVHk(I^V+1UvV4b3kXOioTV-e2hGh^Bk=ib&tUAOg>*;?+$7Y61yK$%x3! zks`8*3QP4bo+Dq4=y1u{SzWzNO&QYD(_7~4jm^x^e0+S0^=r%Zds)Atpkctxi7%K) zBZ`XIt^a%pDk@^B(PxKy!F~6alZ+PDhx5!oYRkxA@V%bUFilA<-TC&iA2p;++~Yhx zck#oTh8ud}m%aMQ5Vd%CcvzTCATBPBMM&5ch<%&Wea{$HWp3$5-09}mX8VP9 zZ2dZC?5e6N-ND_WuT5`tSUXoU-WZzJ!uXe^yMzjcTh|ZQhshs1-Xmq#4=pX_9NFuP zqH3F+rStUkJcWtxjc6N*8yPiITBrUqNB3jV{_@Fgwh4g^R zK20_5iDxDXlPWwrJv%e`cYQ@J>Mg*^$~sh_&J5q#q@Rj{F0vcc-2f9hJwHc95aXaHau^X1 z5)%F{(k6U8QNipusWyC0atxdB`SoN~jmst}1_3pgEtatFCAp}m=opxE?B5(V{s#}R z4ps-XeMloZUJd6)O>kzahJuOBP1QOf{LFh3Tf4gQrmFKRrro+?=(@7yQ&sbk(-gsE7)?S=xHnZ8esY#Tc;#*nd=n z(nnzmi@}c_9T8;ni;DqEOG}jXJ3)tEz#useK25c{qt8?^= zK`*%KWR1gJv&b^=s?k~}i_!dNcdHy0Wk_$|ZUZM7vunP}J#u7HN^TJr7S>o+Nf&@S zt@NiA1=EOnC$F!ql@=GXc9NU%SPZ-%B_-Y3o~))k8D`4x{@&2g5I!F9@2BhR_~hg_ z0vf?m%_6OWjb``#1=CfQq~4V1Xo8EK`rUB0xcK;XM1Ga#{5IT3GK9d~pw8JOlFRk- z)M2zxlV!BQ!wEU0WB6Li+MP(QW0)-$La9I_(OT5G!6Ko?N7Np3)5hNOUl&P|^vX!}RmCcVA+$mPc5fQjl!tSLT2ZfqN z$+59FdwYA$BDqG(%@sAT_-&`UPrXHNM|8mYQu5h!!;q2N14g5yt)0}LA!bzau`L+i zY;<(A(45pztTTC|x6W`DpG_s*zQ<=Hdy~lkV8NcnC##c~}oV zpL6GxrKNU=t-XDHFW?uJzwV95%Tr}||HWS1ckk=#EAF0!4WKYHpLC=1IgFiXvYxDB z+nj0iD(ii7e#nfb`eG&sX@`TN7m-|Ap7sxQzP7ZqP&V#FS`sMl^3Vy+Y*qxg99Pj^ zoVY%p*OLSvcw1jDG&MavKR=HGUTQPb@SL|vJx_Vg@qj<=$(@#~ixV#2OLxvrWW1J= zl6q30A@P8?u-|R0)Wm8K7Xy#n@ATCDg}He_u*=RAMO|H;Rd0AqjJD6tL?z2ilTXCO z$?n~|chTH-ro>Nnrcn^nbuIxAY@?|7_ZB*E?=dnK?ljNVI$?s#cC+OU4XI{lXWwCG zS6)0P5%4-@4JM?Eh6or*#s4Lq?Sk%g;$)?@_THIy`eP>yg#Ah%g;Kh}P59Ot=k;5N zxv?^{wdwlm(Oz@}4-ZcyjZjs$QES^#`BwcGgZjeldidJA{QT8p=48}*IU z>0%l7w9FFx(|Y3f4&mJ%0O(_)%*$rTZEVU+28vRFdzV-A>6Mo9}7?`$DWb>ULPsa zws&$GuJJ_>k&vMC^Yc>+*dws9vF)9mS=D52zfPLWQ6x(j^`Z2+IPN&w{8RR_pAvb= z>R^T}M4Y|TeKSaAvPI^kuZBMPPxyF0g|N9>MT++KBm&Y2qVIzcnb_4~oK<>K5*cJF zLRh`ZIAarrSS)5{=E5s&tqwM_{A-s#OFk=-U0%~#76G>r6|W~rLkMWzN-;nbL_y@! zj-g6_E%duIsz+B-Q=8k}C3JIhE7q$f@b&dAEh`gm_Pu%+8=J{D4G7@3sKv{ds0hFu z8f_JpVI#1qQ#B4~f`Wp32M6j!DiEdZ7d!C*+vG>_@bhDu;f;@vbMo_RC%m~j=SAf6 zhWNPoq63^Mb6i%asjW4EPZ!P^P`*y0A}0@B?oC>Rec|Nd^3Tg-s&ig{yVw=eHZnpo zFfahy`o}6|;$d$Si)HiS#t81qmoEzk(KpAzDWzm&OlKNAqmT->9v3hC1LXk+AE?=rcF zXT?Mf1S@i$mwkRIDHOp3G=-Fgs@2~t22F=Pi5Jd8f(nX_jn(!0-QL!Q*4f#q)8zg5 zZ9qVoSrO<@Lsg{-}ko&PGt6od44t!{xw;zI~z5LtD!O6>860b*XKJIpZe}Dhk zmFb&5fQyEj-@GX{Y^>|9=YRhEdGcc?Mu>x95Y^BSV9Od}Iyf7*sHpUox)fVSWE^@& zWX!*MJx_Rq3?yF!FZ!^%?H1~+|F9#G1x#1{Ds{Zf3=<4F1OQ)KXJ_u19tQw5w4~mS zo#}e|Sh;uU>5&}Gm$x7{t6RUvMF_d=v_nFVO-Tvs?v}eep0rCA_3=#EoPtpcyYC@> z{`^^ZvAY``5fRZ=qF4QU=H%7S&*-SAsGKev8v3=4=xPPTn1rKO+ zcfX@w8~?_XtCaHDm4Pj~s@KKURR%y$na_o*^ZGCJC%(g0v@v=6*B3A*WIUG3Q7hRf zk$-+XeX%y2N0`WN5RQGD(HKk$8G$}$WtExtl2+J^-+FSro%NPZicPPoz0PIR{%Aw9 zy{pUYdmy%SY6KMp#r)4K*&6$KiQ3ek+Q)U;_x@WXnsmfmoQi1c_0@%miHScTginCV zkSHsfPAuB4=?tT+tZZ$pEUVL!hK6Pz04~ySkfs|KcN-56uk^)_TL6u8O3Kl0{rmTK$;nt=US3gy^o0Q0WfT-}D0!_k zm=g6;e;pf0$13E=DJl-1BCZX-8aBQS3zLrx#zrSM>+b1M^O8AeDzus|x=piNIZBVC ztT{J5Cxu0ytr4ZSZ9x=}R&JF>D;>u=@rc;Tcb-gMZY=V#gDt=oH$ES*UPdEOS3py;^(5<$` zM?*scG;F%ulc1YJZB*yXEag~m#t#tkc? zXPDCfSGl;jPQfciX7U_dT-aPDz}V-&Z1;|jJHZ4WW?!WK9pZDTty0bqjq>wDAhThS z_Sa6ZL@w}bnw+JP0s!O6lw56}Y{KK}mw`%u_VE9d*9 z`{~|%AznqGM(V@Gy_Co-y=9dz^4gI) zK{|$3Lx!Fr@W{#r@Ky7?tq8w6bX?#3!C2M9-Z+{9;h=-RKkF~Wm5&~7ZZtL>B=q9Rk z-2C&^0-W!KjEqdN%jW3TTx)>UB7~4WD1|$rPU!}mp8|Ez1+19nj|Hf1+LfL@)%>yb z{{8#0O6xy55E4@W(m6Rgnr6JYu6-D$XZb!ZjsoGj@te7%8rv)q{l<;bs@IcNQ;9Gx zHiJ5igtZv1PFNx&G*7OtueX|RYisMR*qKN)DL&lp)A1eE784VDX=2hUdUdGodc5_# zGkN9p6r|(I$a0cO(dBWsr2xs+0(L0%Zg2j|!KH$Vl81}S1fPoU1=NY`ys9)pu8}Cl zC%dy205ePU_%+{o{Q-DGo-t?v7rgX3-ZFu5!8kZL*lNA)&DD`MfJq&Dd(O#fJGwMs z5AH>K0V}4;U%$M_lnp+nruIPfrt{0-NjHW@Mvl>A9rlgr@8cnGik1@trs z^#4IeXaz)shd*=>#KpxuKKf&1U5VFOp-BJbsg={~o>$*KAOSD3(g5`E+}&LOHh>Pf z{S*`wl*4V!X*~^}M^^zPEl>QT+(*{iqUPECVXLb=5tEuY8V7bRba?lLftBSybjsJyhc?q!4;#>U2mJgL_lFy|kE zr{3NTTFm3PuhJy+(_A=8BKantvnTP0W(*=7V8p#lr;nPNTEjnU?{%YFfVy;vipL4f z+1&5xN82u%hK96Iw?@Y?shI9e)t*BBbbx4&3|vr?hDb5|6qttPzj^bfze!tgK`=h$ zBdqiDbDe724_0Fzgxo%N7sFqQb=K8|08)ydk!| znr&*F*n_1F?J#Zc?6gX^oo(`g4nQ)$9W8i^iIo+OtgLMJvZKv-Il5ylHG=5&Z8Y|z zqQrR;1gx*dzpkz>Gz55CzQUtbsFcUQSrkQKDk$$7Agd+tL<}hY1A~GxA>_OoEvA6q zXq^Y6Xc#Er(}_|;^8|qq5fQO-FWthnpXpqV){@WQ*y*fWaS+YPQ#R-zW)~&S4d8jvP)DJG$9)L-> z2bRgITY&{ll18r+83_rAf??8hV*`Et-@!tHg2;A8Q0IC>W23{t%G0P_i7+V!cMlKx zJ9iL%o)353EEVX#^w%Y6$fRxCn3|#@x_|t5md3}zqIeXLb&sU>zgW-)u^`$5YBk-8 zSN^ka{u#Umd7$_JQ!}fGaa9$Bf=vuz~W*P6N3RyA~p&d zW^-QS?3WL{6ykq9K}=16sr}VZGp!2RPUJ$atcau6`a1vPKM2hONe;D+c!m4cVDH~5 zE6txhd&bJf7S;bjC~19j^Bb6pb^6M`>)n6kA#En$=#gs9(wI1u; zy8&!QP3imY!pG2+WSG>scZ_h9bRVV7DiM1<$WA=%W;a3B;6_(b3WWO&3-a5=>+z6qmeDwX{fa@$t=} zxWYp~^TWi*dmsHawEVay(!G%lAE-DX1w!f~y?L|M+s8-qpt8DJ+Y7~Yp*;-mNvxYc z&I4kc|C%xoJi!ckczMkM%XZlHy@8eqpVNxcXp@guZc$M$f)iYu59}}z3Oy}85s?r0 zw8F`-x1IC$$q5N07uOfNBRv`#_P<^VK>=6}MHX@^5S;!`S|Hwjp)_)*A|p_SyqU8t zq?SjW9Z%t+;GSA3op%PQBb~ zBTVM8AaEC^g&>D?6=<8^f>QJA=y{1zGcDM4ZnPqH!T1A#*zju84xGL;enN<-fU@OO zRJvf-%OJ*Xe4&Jt2VqxtdgM-EeB|5P1=I>Tx1a)+fQ%FJXmeRU&H@m{w5juyl5FO1T~lUH$ueJ8WG4$2SBoH@@iR#IDH4-v1#U z{(u4}=hoBy)H^K&OqW|ik|BCkhRj%JEvo8{YK^yM@0o5l9@QlI(%G&&owuGu>1S;) z&S%NrDb%E>m57#2DXV$!h+eI>+tN9NVZ=ujMXEJ3V!$g=h>OmoCK9g{)|^#Mzp#!X z@Z0H0=p<21vS5DR$?1XYz4o=+#hHXh;iZ=G%G$KE9EI?t4ddT*j8Yc$7yj+9rzTdZNGc*U1-rt zV1v(rtA4FJdTniO^1&i?cXqjBzY2TYT_&cysiTF1Q}S6;R<$6@G0Miqse`-Od|(+5 zKipG!`;@S{Ww7bCaKA`68&g~l5hZarg$WNhh_SU@M8^Ty;-V{^w!EKU!#_^j**g3J zsc_s?c4$j|)j!yrY~-|`F2akR5eb;{`C311*h8&UwjPw{en1ME@$${oiN)& z?a*yo&$QOA^G(1~_KWmdPmFJ5eVzZB{?RcEN=ayIlQs8U=g4`~>Kkq~C{St(tPx+^ zSel33PEvkitW$yQ=5UbEyY%4llyBW!t#=SPl7X*|U+=Na*cnBQ$sx;8 zBUEEH{@uK=MRlqsDU9Q1nYnD=r7a`nbH4A1M|$@AQ_{}XfAGJIej%T*drx+d=SS@;@rK34BJM zb>~(%a%CeA@gfN*qMG?H4OZ$bzt7uq*8F%fC0`#=vt_Y7$rbq99=U3DryF!jWx~$o zn>wbovV?TvthcgK+`cg^Qr13_P(R5<`2CL8z@y^C`|?!ZVRN6Yc&34pO3SWgmg<__ zAzdQ#$yA)ci^c&7DUBNqdY)!|dt6x9_~OnIkLr%Lg7yB`p&;UM?sNa*X(11Qh5ME_ zxHq?hmfqw^x%wreEr^HA{`h0J6ecJ92cSER;7L~?mwP5;@m+zhD6bAS(! z^STn>2u6*h5W^r%B<)~7Nwb{&vTIr^27}VJERq@@o4TzzvXg`Xlx}Qj&U6loPuc@Skmzv58kHdI&Um8cw|5#*_ z?ra=zkAlTUK^Wd^cv3W4@+s@UDCE3kvQkFDQ6X#*XKZ4*n%JB6gt}kv5oS`_KN1)o z3{>6bij_*I@R|(kMt5&-0N5FUG_^$i%k90`Nm-pTcXX&z;uXwe*L&5ff7PV$N~b=c zNqMhS(sNsl3PMuCO35W#`MvS`d^8-!y0gUWQ3m$LyZ#J|FaHTpsWT~~o8jRXQ_anM z@UlXgJC2%sHI?lEgV}RGYxLoFv<$p(+bv4)p+2Y?J9kj6@XBes8B%6-_VBWHUqy;Z z#>}hTH8~}X#F8B4ladR|)7E3}vy}v`W$c0|NG}QpGRGJQ<6>=A&FUwas&~%CA2ATheE*te1Y_A8DnR?Y zb z43jopTi-00E@x4k4(HkVEv00`MN22-?6d1Zq!vK)nT1L1?mE?4(u7t2MCW;9CyCa_ zmKzAbuN~2gxZzcztcufE1x0Hq|I=jp|9Yd8dPgXs>`LHkbS{)Tm=Yc2L|Q2L(kBUQ zHTO!m%gMFD?P}m=MfEk_JcY{2D-py+@zv9I^Ak38dKVi zC-+DTuH^4cEC+hd3M-vsoDa>f&#h%89u;)7bj zZlo5S``sfMXI0m+-2OYEy02=Qk=#F`t=VsM2xD85XpB+(Qn#)d90OU24a##%S*u3t zq^~C3rSw*91i^F~M=uOp>T~$(Fr7KxXUU&xh0YjwE$;cZjg?#ArV8p0JMz@J6;}SC`B?WD?=95J&(5zJ`39rRA_f;fF7}9Nm%481jyMWB@Awt{)9)C% zDOvR53j2)cXTt4*kr!$d3mF}41Fg!h-94=YZ!O*)x#l)1%1a)K&ytUGb7I5CqC_|; zI!Vz)qawD(n=l*C4<#Bs|GC&&TiX?F7{w6%^*?dv@B`bflGe5WhwDC%^v50;umK`X^S zr>OKnVt;Q9W8gbCh+~z8@UNSwtg@0>n)S>~ zK8GVOR`G72Y>+i|7ujB!EAI5b-E4-Tg#YD=wwtk(%hL@?ioVZL=c!C$xtn~O)<(bS z@%~meczz(t)oFh)@ibl3mvSKL!%w>>Vh|Ia(LRuPgxuJyhO{Kh1EaM4xtRb4kCY`x z58glAM)0YAtTg{pd6Oajp|07@e=_#}jG^Z0L6Z)}fK7dfzdAcB8fmF*sBV}$ce9n8 zSUA6ZeTnO+CsB@>PC13*TfAZKEl`bM#o)#DSf0{W7|;~RC}CDkPKmG}ma zV-?Ik+o95igeW;Mraz@j&_&MlwyesX`|HiLlzn~uM7Jp40#u5;@@D0;nf2R2nZ9{) zkHFv8Ux3&Fi{?s@H0^7_o~DkK!oy&E@?Z`o-!LQX>u{<+w#ExRva-$5-ExD7 zncBa-KL-4DSlQp7V0RBT>8Z{7c%FMgzHj4SY-%l7%-dVsqWf86X*S~i`rt`pV#B~J z>G{k1cB&FqTu>VPT61(U(HPn`R(d^n zbb54Rbg)@uPMd#0K{)qvl=xiu>aS+-JIct9uzw>nRWyc8K0)pJ{m)FuWx*WVt;T!Go$hc+Wlv8_AJ{3-mirkv_-H0X#oCN zHs8Sx@gxwSd{f-+xVvKhD_D;8ZHp@6>x$u=W37PGU%bz|vXbm9wJ9Dx-_GrAmh#Ux ze->c^qN^&NMTO4T8s^VXVRGzdTi?>s3O~rF%5*~b2m!ZqYT7Pgt^OkWnY;swfxGA8 z6`jk1ypG?mMJ~$h{)i%;Y-Q5saC{3)1m5Qa-@k;kHvi)qWN8|f49{PW9OO=w#H@jCcBWYS@KmM_a zf0OCwYOfqZXZz&f`GFeQGdNn_RIz8#uHKY<^3c09|o9idntC`QIbMY*_(n|+foXpbRD{i`E@a;hNB{OrWyjuGGaP%gpwZR77g9| ze__w^7c&J%oMK8Hk{>oKaXov-+j_|7wlM zxgFkX!5eh^TWZrI1^b_k#J+EMS7cV;vS)tq^OgBXd0mmToC|W|vmM$=rStPIUxacGW&4^v`gZs> zuIw)B=?c*rPGP-gg3Nm+@x*&n95W3q0gr73YHL? z_`lA&wWTN3(mycav1fs|!?=Rn0fI1Mfhy!-$}N61yM=|KH}*o?1m!v7nmTP89e?Jt zYNdH9s$WM$yEf?_rMR71(lTar?`jo%Eh5Oh*l1IDQhnWx82~pE#%mdP^?2pq-2n8A zfNpwkEayrg|CjRRvGhOuR1F_J^)eT@?jtX2)$+wZ@NnV!h7ArR{j2cJa(wMD6kW8ScOjiF1F8E`3)gYT~z5 zi|mZU&oOcnp7BP}=JcY-nb;aflO|SaR!yCBETY~`+x(mqL9k-@e&LlqExa)ZYejqb zp6^wC*7M1B%}TPD4~mHFmCTrC(D%*pdz=w4f`Xy*ThnN>i1D+CwIspy9)F8bBc8E# zOh5>Q0v-;T+4*DUnVz*=tb#^}PB?MQFUj4M-iCGE?@r`)w(M?;rAIyG|BQRsGukhY z*%d;$U`;2oC(si8>T%sJ5xp?q*SaE^Zm&!zaNEbF&^>o=&X0W)CSP~poK>Nt@;q3l z_aRx>L>N_%w)wZBR#>%2riPkqSYij$5ifdFl^;?-y`gH)u>4o6lk+N_I4e3d_Th`u z?o?pqdI!3%X}OJTu<|>xALA`8{)xd?o%IXo{rdNfKyeLwfYP(+%DYlR#Od8$^$DM! z%r=^Cg(ga7V0!;iCaf2o&&uSY(HeRfXZUu$KwtL9B8tY#}HkP!N zs9?l9?zo43(%LV`%Z(xfsqd%iIXJk7BFywQ{1x5yt%z|&F%|15jJ@87>Tf#j3+sj5 z3~4_-j=zy&YQ-sT#SS4;$<&C2%RU3E$ga0#9{E9&K?I7v(zqGSMTL(I*?q-|F=Uz7&4}3ZKVuRy5Dd3Mr?I_` z2{V&KU)yStjE*aOhm%kvfA(5N?y9&MBV5b;lLM5sq}6G8Y{2j8BNK3?ltG3#HP~^ zNn^?eV-3wj*L9q|Bj0q|Q#Ep$ND^x%O^tksj& zu>2(jifcEu#*55udep=0osf;dw*xO!f|r`9?!V_@tz(e<*T^uXoCDTx1y~y4=M;u> zICcNKKhy6kRf9h*od2ftUHmahzjjGG2j3FEdA2KKuDH$}u7QuE?2@jkJWY7BU(~g4 z*W~jjp7GEwkG0x3!MOXH0YIE^Ws&w5_4->+_bY2BBoqVv!w*_&)+a31w7+K8mH49J zvV_n92^UrI6Uo+pu3}3s2c+BYZ1{!}e=T@S=7o{(#yFYu-$ih*+ZVcF;`3)ErF(WK zl9(8{b1#)cP??{LTa@PoV-qc87Huf;87E>0r11S*+ygo^`k6PV{QP}26nh87Q&9ax zGJE6qaPO{?>px?Zh7xzVo7_qQ?5)Xi*oKXnMGrpH!0kg1IEb-^hOSi6BVCi=ksFH9(7cfFTF(6^fKPj@Tj2{D{m)6j%_H1p0Mlf zfl%DYa)BG8B7Jy6#9MW?0|E6gmE*9|g--Ye8fsQQA>@d#1KtS5i3m#h5gZG@2!+qk zxIT%@dw$3nzLdqI%a&<;HcfamePDDVDRlk}7U|gPMNh}94_wH*=w#0;@82@b0Ja|x zPb)j(LETAi@br&8_+7H6ZusU89D4-b(NM=Bf#>gD$;ZNdstbk_Z!B8i4_j~2Lq_?8 zy?-q==bpgaoGbFkl34U0zI3=yR0((Qp66$BkvR!%X2qesz7X6XLeHa&L>Z%C>$-gN zg6IcD@eOL$H7h@M{B>MCLwspXF*QG+*(&( z&an7jfKdz_#`fSGT{V90w&9!q*#U9r!gdy(-$)aKs?PghP3d@zNVKkDIAzPvhha+P z&y?|kc-^hdot~na+-&Y=_Jxc*1B&~sdSN+#UbtWGvPJ4MxAEse0O)zod3rUuRrSUU zZp(ehVqTxf_fdl#(~O<(ax$ej>Lw=PuLN@PkFzPodmjc~B;X}G7`_ck$`MGCV`CO2 zhUtsD+t4114Ky1tsUv37-_JRl`kqK$(5<$r@;6^(MnSB~apYP39QOR@R|KXXE#aP> zRBHj_+Ddl7ZOMb#MKAE7y|K&Pp{fwaDMfXOR==+*3Pc%_{XfNS9QGB2-(Pz0(;`M^ zJ88Bal1}NVna|jttXX```It0r)>^H@Q7aV-n#=LwPn1?t3b#IFwnrYQO`TP5-Zk=~ znUdGc_sM|tc2o2n;a!n)6YYqfJb8up)DdyYa^W{}-+mq9RH3oi?#gS?2s-B*MZrYm z4S5u2uo&=;NYRq&Z&9>#4SqO$rVJ{^L{@wv-x7l@*1m75UFA8f`2e( z9r|o$1V{Y4m7n~D4|yN0e3nL|ooReyoZl<1EsLYfOA>hF@gDGvyy8aRn~b{LRd5hn z^=)QuyQ$!)V+|XM$}ACFI;>VAST%M0BpR0^zvG$a#%3YfjL>YLZ`69uWa`dI8nUiEd@s~8mHa6XB^sL(5X518t>>a5Wg z64_p57Oj9)IUZNrEMfRe(1Xha(4KHK%I+PsolSO&;j%J+9fWr8nhHsQK`@Wm^N?Ht%7?Yl)A^ zMeR$Aw`{ih^L+FoRMr0~INyEl|1QAvK|CS`*IjQJ^Rx1?u)E&=T7+6aLjqUDIBWZ? z&ly0dFX>iUtk~-4g?h|5U^Io5OH|t>5$iOEMP*2YI4!joS2 zyJ(39NhYCb?B3fRdzGuL*@F`TUac%24K4X zUBU)x+`G6q^=ZvQ4V-9p_pP>_jHh>qS~@#1*xA|l{{DSiS?S`Uz2125-n}`XF(X;& zz?`VK7EkZ`88m>|6jOu-s1jH;CKeVUz_@Ga?ZpME+H%VB#fbtf9bHU*{sUpJV?3ZZ z2Y|E!7(~W4Hh4Z)bw2B?zz6>DAqobIyZ|1$1FZoI3oD<5DWIa_k(9JFy`Uf^(5mcR zU4ubyqokzNvU*Td@3|x*pQF~`ae(pT$B)p62uWL8c4``$n?ytiGP0zNFCVp#TC3Yk z%6IPHM+e#n^5ejP!^6kV%*)fB9w~|=Hv_6vcVAz7ZxY8o=o&aXhdxW=9&U}_2A=6I z&ty4f9CA3NRvK1>jdTF`mT4=6eQSp@}n{%QqMHa4v6h7F;>VM8j3K$ye(_Z~;A zoc-S=d8DjNOG|6-=qL!374E{G7zddqks#YaNn$tn4jhdkkh>fLU8mDJ63X@l#h=T z=;#5k1;Ka}sQyn>r!^TgmEfL0K&^3FCGb4iiBkR`sOgsQN#o&73W_k0B(@9=5`t{N zIV6{~r>EyT&?`WFu|IB+VR?~tNwf=6z3bWvHJC#eP)Lx(W7xbn+k^N35FNo_kep_) zJ0Lk_i3ebeXl2O8(3$`R9QgPx@8~5rE)Txfh4(Ih28!Cy$cVIt1~CYw(m#ACs#JzG zL@FqdN)He?B%-Rbb*vuzggpbQ_u}sY^;e)_gNB4++Ll8>D>E1<@H54M0y|V@CM)>l zA%N>A_ioJtw_{?b7$*tn?!d$O#`Z3QmQS^^`z{?R5S#-81Ml$il7Qaf zL77ci>pGY{kh*1oT(h>M8FqMdgd7O4$mm5yn-mgL;^PBBVmdO$xSf3tq#^JUD!BA} ze;R+0O>Le^MleWCnw#Gwu*GSx!LwrLt+AVV${!?bZEeezA7L;-7Y{@PNTNP)tP-^3 z-w%RF0+?Tg4GbhRdtcJ^lS1z_a`b3ORBL z3eEGsa!2b!nP@j! zupGp!2HY`a(71Gg`>?n?Mur~XuUGQlW~`J1Zyc&|SX2;W_rE-d1|?gg>rIxf0+Mh?3zXHFchK{B_vV056m^e{a zC=8q<;P=Ut9BdE)XG`GmV-l|cmRsq-w7L^3XQlB+&?N|Bu9~_!%l35>ggi$_?rl9a z#^d8-P^tiZ>1%~~gXa+rH9^REZ;{#7*4DfD_-qIL*tvKhMrrzHnx*h_=I$#zXiVdUY%p%;<6kF z10lQ=gd2USyhTdY84z{-KvuYLvD^G+wlBiUu-P~2zG9*=?A7uHy(D9tI4h~P+=s#m zP_le38wCEh7=)j1-xI%i9J~abEK=Y>!Z&!5vwi*R`1I3axg%1V#iX461Qu~>YKqf- zt_8<({ba}W2bkSkble_q_QL$-q-=I-?d3Yh_X;_Xp5T9E!2RCVrkOE&a_tOB4p_k` zz^ut#2Y(L%$|^Dk>{d>FFE1}IHSGv@fiX%vD{s?)m^S^-m>4E!x`A`ktI`80pd zc=H=ENF2i`^#I~=cL2nE?$WU)Ok_abI3u+FxIuISyV-Z!m@Bj*gCb7^@B_9*}Hm5Y-UtX|X3sf)?3~#>K^D_Tq{zM=cBjHp3=}6M9cljh=Fu zwqxDE#7wfL?Rb6g{@6$ULUP#YS&&5LKADkx*T>vClIrG})&EwtSf>m%f|Tt$h++^3 zAS9k{eH$$GKKskiI1z90PY)yxAOe%lEr?#+HE>z3Dg14rTx~Zi(-lnvK@u0F!bqkt z@6LQ%#69lF|H}mkL8eqN=bPxEP!P#z&dkg#WT^)VXv^dz6$l1PD=MVq0w%9F7s zA0d5x{kesOw;(>~FC6&RFib?E9tJKI~G5rTO zK(Zr2*zA`?m&?I{Wg)i*{2}QynEGJhT?T8PCBp>V2;SZ)4rC0js7= zN=r-Y1*|v*Vhp5;7b%a1l+9Kx(l zhNzE5uN_D@BS6q;Mb+ee_C6>msEmxmka&iTgyH^u_*YOF2wzJ!hChj;krETPgPrG{ zUx0io1)i`?ilQ%HU0sb|o*gJouK=9*Psw3vX^CVi;~_5K_yC0|I`o6f!(o-@kU#Vq zJ$XUd+&4R$A>wl$|3SnnasSLaF)>kVp-fwC8fO>WHHPr22KeVl((@MN`2^!;GrUI4q)NFc=4iTW`?$)pulR_74kaFUBLU46JZSGYNRL+81k66Z{L3L z@}()@H~&Y9~vS8Ms5gb z;}EYZ#XuB@3X&!~Y5^kP*qU`l`bS0KBPCh}1_nqyLA%-4>^hR%?S1hs^AQ{@tS4Zg zaC}1$zuk;6D0ub`52XMs`n?|%L9ZD9m#VS6ul*;cNMX|ibXqrolL7UY7S+zyRwQtg z-90@e?hxe%gIW!(+pYxj37cjJNYCL&1z3wQ$W&0N=O1FOk(*?fPxRZ+%@n)hzm}(*qW-n4d+kX z_)n4xKeR(Nu?HghO!+uQq^1*8+Nxi+wbeoPr@U#QTs(B~JhjS0Umu7vuQKb5203aC>kqgciXlp)w{Sv%04axI zB+}s6ld>Re6$&EjF(|f>M}yG4x%7vF7+T@T4k$=Ur&dq<-c&NGE79St4mgnT) z&ul%caTXlFgvWeCu#Gde%x5t*TC)a@7>j}azPNXf&4?!Qxvq$TxOkk zqSvQpzlMjcx32g*-(MI>&VKE4)#YGdK#h`V1F3?%i3vSi38Lk4&m$}3DH^&ZFCo6H zISaxG32jb;A|dtlUO}7AZf;C>?tEIZfD{Y7`L{@+?bVf{???9qWc3fq8O5N@-oe4H zj*cKOs4T6bamW?0Ag?MXxXnP;C=VdqeLjdRS-`>chqCACo(X)J9Gv(9724tMY;*P$ z?gKVVfQtRPKgTJd94)J^=J1FD$^Q>H9EQcE zj(_*}=iw+4Bp$ngfpLqHQUN)@l@tqb&lf-5-|zbwIPi z?ZX3iIQRsckkA57$RJ@C1N{aZjiLftI^>BIptb?|WGuwWtVJj*dTO0k;RF|jA7+qv zOhKcGEHR-(=RzLSFqmQFI7UZ7@dE_31A2CinmNrSRo1`rZz9k4I6quRuJg(4n~a-x zWi;y2Awq(2njdeCLvix{kI|dP7w2}GvoU}FXZ}TX``x~lhDb5=j~`*+@1SJD#>ZcV zqPrM$1o^cv`@YJn;3}g6ylR?Z8ZqDW9K|GVr%PwWx;DEH{Xz_ibqA&=@d&DQypLzo`J>45Hg=Zr|2jENe_#{6vEwWJL{yZrkKbNE= zJxW;th=2NjT!NHiU+0R{E_i%wy0Bc2H*5b+GHy1i9i z4L^MZNHEZHW)FgI5)%`yxGlJ<;xDpW)BaU>myMZ#x2CP->NTuBe zV4I+pBEnP4VmKNoksb>enfSA0E4L~W0KS}^oyQz#xeEkg;oW(HmYT?rtKr*y)-~+_YmWTESU<(2YidXm6J@>d4Gc$mn zLBh`f^^}g$LIeVlkf2~<8URoTyI4yAj6}#88T&zZz%T3{2>eQbZ0-vq+h*zQjXYN0xXnKy+_P7 zBt(r4aR-by3$d zE)LNLNf`Je{r>e;4uq&)18YOR{_Wr(AkTAN`Naa*40$Ifr?*jQ-$fudu5Wo?pDM^U zVZoqvz(7RqgSP^Eh##(h;9&HCfhx7V(+090Xad>*Fz@c|1wI8YZF$9B0&^@vK+5C- z1Od>{z`IBEs$d%eIQkn5X??DdUQkdFWYZ$*xO|ynonCMgfJD*!{CFoMEG*^6WU}x@ z8t*Pp>QYcqDF9lHR``ul=%X1>!z7kWbzIHlSr*w z3FGD9dRagVia`e8@rHro86dG&ON~owsbC=hFd77;o(<^vvR}j$pqMYgXs%leqZ+3m z(Az&fnC&FY}e;+X%1zPfbF=R5C9awEg%n2pq6}l9Gf#B zvrIMmih~Ugh&%xL!d@NYnUlm=b}jM|E}w6t)Dqr`1Tl@KTt zC^$f<4`_XM9-au0Yrqzc27>~73GD!@`pc2`8Z>S4ii)nl{0&fdxidqLMtSl5r5C$) znAP(Hi~->%2SXRtJws2pp6=TK-wy~NU;sfreit@M4(NiL`1l}DzJgLtoz81Qxx2Rq z0ecES1y&Z87A(ETw1laAb~&5IlGXA*^a4O+3!+ES+L{~mz_rL%v-&P|#+H_xprzto zRT+H=lGP&W{LbA@tLNndLfgK$X7@?Z4*{nNRw#5H^EF8!JKDa+^gNOw?zxfOMMY)X z<7>_WxLt&#rWzJQeZ)~F=s%z@!s2kKX|kRdz668EM>c-m(gukSDYh<&f(sltuo%@` zO`vwX{)n0m#nj8Hwh=}k2}Z&w+Sdu?S~Y9v?m*}8&7XYjjrvj`si?_0zzk0V_1G^IH=R$?*3p>sIO(!MgV5*O{KJIWm+EV`@!Vb|lNOkV1 z-AZWt@({l&Hq3tMInw6(Glu&LB#+sYKj z;C1C~ws!N6Zd=MrrMdroh+gxk4O8N@0O9Tn(jntEJ(aki)UG_Ud~Hq#gX?5#4n)L7 z-Og<4Sq(R3jbGDHs2#rx568feL}ypu`}`U`ZW`gEh!j85|EoZ(Il+ZUE9$SjLpo1z z)R3p@9Fvaj6X`i_!+a*vs82I3PHUCiNp^9&e~NIa7-by>FF*N&)uzw4M_mndoZslL zduGXhKB`hHuIzYT;>ki6T~xl^`w)kD7a(zb__3@rUeHY}c03lI6ajAP&qhkA<5WaG zsn1g}`hdSA6Y7SN!Yuq~Zqr&T8+R_HxspDY4Jg@*_1ogTo-0%d9Npim%AB>E?#0fK zsC{_2En=(t6}V^HOP)OgFAYy%BX`km|pek&9I32&@z# zf0xe=8KbIBgepgCI?SjYcV91_v_fsq>fcK9PXS60Ng<~)mqE>?aj+12$?SP`-};}@ zo^c(2TsexaGK8=yQ6R9Qs!+HM7LHl( zAi%}%FW>v*N+easqhN66S5T33Z;=xQcONgt#hv~e6IJ@VDHRvDTf~?~p1VqdVl6I| zx#Fk1H#d$Z%s=^o}#*a@44uhCrLOrfq(PG$d7-2IIEHdIvL zCNtkpgwkEqyO=6{-&G6+Ll@C*S1=HtAxY*!rS@#iXQ}>m-UXqWm4L)*XNs(mk}=g4 zpKCDl1hVt@;aMg88|jf7RS_E%v>aT_n3m55E-T)Si+i!Ifgw^lVKay6Tg9^$*E4CP z*qq*dU)GtcRG2RegCC&OnQ2`s+)lD%f>94NWk)Lzti|ap-XC2s4hyeo%SdJ<^du!T z2w^S-gUTrA?kgD)osZ!AG|DN&TfwO#jSXYq?y|IsX3i_^IQG1-FYPaBJsEB?#qTO+ zEn|&!A}btxp4A3^*QcSayt4c}ZV6^K>WoOEBUV(WZP>ItVG{>4C_v{Z+4-_582IXg zi>WKQ*ji(vwd%r^!G#!gFSQ-CYATi1LMBrc3^uM8RgoPcDi0z=8zrU!g4vi%2}&w( zDP$d^nNz8uVyDRUmjwpDGv2?em>sCw(|JAKj3|1uS6%(S2@cdY=}vT*=rWHtSxiXm z=&xG$m&A?t8%)^`b#f74 z2vA$C#V&kP_4?<8FdutXR2qHcxb)M$x2Za93cwB;O9UOEicXmaU;yrv%viIA1-b6(9Fu(1% z>Ao%rA30PO{2Dn~_iXKRLPRGNp{vb z2;s2!M}ay0AM%yLFDE6O=OVsDP(kN?G?-uYKUdh!q z9iGwWGB;D>Lk}ADa}J1)?`SuD3hBQ^PgoejEC0B5nOzl`dDl1d0|}HD%oM*ktgS@x zif)8m>BOl}Wuq#P>5Gw7ZF(@hsv1FMBhI1#mbc8aMbBN#$8>&YMxwRs==Y%Kb2Nb! zbB)zuzk!$RD6ue$thUAsYi{;;F+<4TnhOc-1+Q>D8^0;c%IHMTt@qRJUYZcwqF&7- zo}-f0%0TIH_J8&joY5_M{RQ?L_fa7(mo{)NI~Cz4-NsL!NdE2flus5Nq6(Fngm);T zhIC*NWIU7csx@Qqq2i96?qS-8b}S#*E5uYy_^Nqr`z)oSOollcN$ z9j_h-s*19h#k;6$mDNe92%gH}>df9Q>bB&nEypQ_+aj>+s(G4n&FFZH?V~O9Inx)) zOg;rUJ4r(tYiXksPvn<6-#R`U`tw7?>XL=b6H|WK8J1*nmI8HjXhpD-P2p~f7fxwv z$~Trt`HGt+aP6mcK}e=~uhPop?U4Q-9wM(28Ohc*bvV6fEw+d=Jo)4+(}n-bZZH$_K>Ig0TCxN{xx63 z_#$_-ig!KD-tp4%y=k7@CayU)yur7}UnQ#qiDkD5D_G^IOPPv>K^ntO1HU)C@?+rQqH^dbPCg1Z zhe!qxsR?n0(;Z&!&UrSeFDAvy+B)DePwgU`l;Xtl!q=l9Mv&xoRMOg{#PqKKSv%P) z>S$xO=^kBH`3=r)*i3Ewe989oRr{y(B)MPV)VE@6;cQSVyN_dL)<1}AOL>wfsfZ>g zzpw7dDq>=7iQJbSzWm1mM2V!?`)t#UoXWC+7T zq(P;5{7QTFQaslAKpSjxV&qS470B7sdzUt3+EunygHJ^^t3dyH{!LyWKH%Tj= zhq3!K&Lwje<4!g0E$6!uF1NxE?vE3t(HAxwPXBvj>l<;z?$g~V8wqLZkgZuD=IkgFVlWuN2oVn#%Hy*N}4#9efV%QVD^fy zE-T9xy_8ON8Hck=EO#3t6Q$NwaPU{_SsIy5r^fd`-bkl)Gw=_!2+e=Q4k1G3FxK|8 z+b0sG-;2h4JD*+NLqlWb}tcUUCctXSeO*mRf$fkjO90Xtz7E4C6T~o-@B+Xm$#(rzz*}EAYhxA*gza^P> zn#Jew7jVgfJ80buzRJE}q~Z_D3tNOB7bT{e#FYOy?fpRUHqq|l=e%#4UwF*UvzNMR z5G~kMO8xz|YL4CjsUr&mPumDnN#+c%!yN?K7Bu@R=jvCJh~t)G;U{XpbDZLP#Jh~Z z15==J%zC9na;k91a{5LDFL6X*e0RW zqm6G{pXQDU&V;PBZ>dWku`~OL<+XYh?xoN!E#j2^eF=qMI5E8mD9OhMv8+r*wW2iR zoh&AI!OM6yB+2~vs{W*6Z7v}%!x!<^F5#L^xJ!I{A&7;n^!2REF3Ii!*Wj?v#e~@aj$|$KF-j< z)_;#Q>)qK50)mhFM*+<^qwH#2q>A+cd(6{$r zgFoI7dfVFeZgT(0>Vs9hbxb`8mm*h&&$>-odYZS}uEl!_sj>nrFERr zaMM0-psQax_%(s*>)!|v&V6R#|6!G}RPn zn=Y{k0lSHEPHE{U=eTHGOE;Uh)2@=o)^w4;EDIhpxfF!s?@iBXQYGq zfAa)6i(sqDgaaC?5bD?bnh5X{w>(L*-18Dt171^r7y0#@Xy1oK(X22G=Z=cC)_D-- z3(fs);KXSL`AX`Rv>RA*ekJ#mC3cKZ#(l$%bj?@DmVEV>K$5%OqoK(StxqpzRENq0 zt1kzr`n-_1Y`RpX6Hg|SNo_lvo#x@?$Idk!B^Z1xASO_Wi8|ewj8~TWi|j|Eh@|+g!1;ifU+}LSijYUC z)j1Qhjb1wkG+x&0@}uC8x$T8O3`|W$A+N9o)8c>h;^mhiA1vH*n*jatVeA}Pb9eN6)z@VC#`$WIY0n&eAD1>cNI`;?X zYTwlTKjg%-S8dM7X8N9*3DG9)O`U;nFxzYutaaM)%iS3twbSeyXs6zYNl6V545#df? z{q-RdU&%A#rQ+`sIZ4X7tq-duA|##>H%BUO_-%Sd?z-;>l$Za%6CbVZ-F;rjEBC@N zXz}ach|qWX)s@O+SlplAW?S!LVr&#$Y*uZ&ME1C!%$W&&O86WWm#7}vn<_C7{}i4f z1M|AtMs(gLUF&2+GU3X2;&ka86$*A!ehR28v@YN8aE!~xFh2}=YyZg(-pUWoa=&*I zw<9!xyQdYGD~M4^bRtx=UTOgT%I@{b2F`xp8Jr(!wmx<@dJ7)BI-Z7RE@VE*VB;5w zR+|?8>;ee|=`{I1xWsA7KN>V3q|7x8io-yr**P9vC1sXq#r)QbZcLZvqb_ELK$5HH z14+Q)>$l3(or44!Jy}{dMR;o#rs6KUZNH`6M{}xKxwaK1I?D~AkbFvP7wjJv`C_*2 z|Ni71$Tzx4uSqZuUWDtT8{GHk)So9m_BcFjI2rCEo!Bch$)MF8;2r1%OquQ_1)%b02!XCzhvU%lk4?OJmu*UOf7Zr z^o`3iJmZ2$Jbi6v(I%KGp0q`FFDnSa#-*{vZc^6;fQ*U; zhnKLyeyz z!D&ioL?5W{S}zC1C6e)t3yAm5v&zeHn1ac=Zf`&5j7iu2Tjowb?!3F-tr_ROF@s_` zQV=%Zzok=RIIxGlh$6e(lFRTmzwJ7AiD&9)3ht${k5uuggXnYP>Fs`+DB53E&$@k1 z4>kvb+6a?`zq+}*v&bKStb7ppqVRWX`(DMiqsHHz{b&(zJg6)^p5@((2n;+JlG?0A z6Sj4{5Pr9P5lt^ThqXUF6XmkC8%-w=2;o*hAoFz*2*Ix-tK0kBv$Q+kJ2OhQQ;^k( z;*_1Li%|rb<<^^vy!Io#ZBbE)6a|zevwWPJ4!jDV)J% zcM@vqGrlqjGOp!R0rM~V{f6^%K5&`6gU&r#DO>X3Ho~Q?iH6bR#Yz#_f5D&q*QqDgYk%*en-1 z?yOy!_SeNUoJ*CM)c3TJsfn(6gjk1TQ;_+IevSSouLlw>XT!>oPokPlUXR=VGxQQ< zadO#k14YDtf8c8o1{>o|s$2f&0uYFJPxR`96`ow{$I-4shpl-xF7OaRGqZHxc)B5x z?MMRQM`(C(WAJ_uMe8jW(2-Rdo(VrP`tg1RIrR2an2L&eeDc4KUG8;6Nu6Z|F03qc zRYU#S@?mTM7Y!3C|3lezT}CiEF(#gWn8#neEfO5T?vOn(-wz>99HF)?E3cHx)qLRJ z1S-6wp=9>p>|AsAD9#}fk_wHlD*g7v4K>gM$CW|-8Q?$1` zcfXekSqdvE4bVl;Dht(z2jZXPPy|HKr%u!O^ZrZ*u(ffUGkG^*p%-!tigM0EjjeOk+6lIJMo}~E>*2Ce3!C-yfPGQOP;}(o(x7CN>hQM9+cpGp7(F> zoAneVNb^vKHmreJ#VY zF1>~W+MrjGy4-eS3iOh&U<&wzZoY^doWELJ_IWf-$h+jliJ=dzpj|LNF=HuGEdiVF zyS?_UyN>Yl4t7$X-ch0wThniuVwYWcrh%B<`E~=a)ee-WLcvk3abK_@P^z*_ootm`zR7nuS z@#os@C5`n@@A{8NN~G0E=`hdGqH)?XB= zxPGQz^FYJwU+yt-5|Zz2K!{!WpSYrH1d%)dd#If)99EE#a8@|dEC-6mee8gR;!031b76ZA7CTnYh3Vd4rmIcs!;M#Yf?9_$@7|5t8X4IRMppnpnj&Uz zP_P)n0hY*7X|c;H??J64UcZFMXyPoNtHXiK1dZ_AK?w}H$!U@WM<$B=KQ>$^4oayonB8Isc%#2ZVUi%vWaWRr$&*ABw8RWkLBoen*)aq8f)tG?Vlq(y#GKH4erV=P6)~avKI;jK$?rIUNThe}dStvJ%I{zKeAfRDZ{arLV7Db-yhfYefO*jU3aF zYhGwjSF#&Rs4)R5uPE2Xif9&18g4j4WK@iIMp7i#<~_+>we_;HM6Cc(1@x0I{Q~Tg zk3SZcO<6nMU!?jA@5&D9@mF=JVE-!vSes>>>6p{%gotqLTZFd&y_yb-L`>5zK`0D- z+j{EcP);QY6N^Jd%>?~Kf+j*~ln;>x%uYuoTtFF?4afZdI`Ln*=%h}v-*^cAA8z>b a5*qDu712aAr4D}h4G*ROmoJeu`|y8h6YOLF literal 0 HcmV?d00001 diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md new file mode 100644 index 00000000..7292caad --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md @@ -0,0 +1,156 @@ +# Rustworkx Backend Examples + +This directory contains comprehensive examples demonstrating the use of the **rustworkx backend** in GraphWorkflow. Rustworkx provides faster graph operations compared to NetworkX, especially for large graphs and complex operations. + +## Installation + +Before running these examples, ensure rustworkx is installed: + +```bash +pip install rustworkx +``` + +If rustworkx is not installed, GraphWorkflow will automatically fallback to NetworkX backend. + +## Examples Overview + +### 01_basic_usage.py +Basic example showing how to use rustworkx backend with GraphWorkflow. Demonstrates simple linear workflow creation and execution. + +**Key Concepts:** +- Initializing GraphWorkflow with rustworkx backend +- Adding agents and creating edges +- Running a workflow + +### 02_backend_comparison.py +Compares NetworkX and Rustworkx backends side-by-side, showing performance differences and functional equivalence. + +**Key Concepts:** +- Backend comparison +- Performance metrics +- Functional equivalence verification + +### 03_fan_out_fan_in_patterns.py +Demonstrates parallel processing patterns: fan-out (one-to-many) and fan-in (many-to-one) connections. + +**Key Concepts:** +- Fan-out pattern: `add_edges_from_source()` +- Fan-in pattern: `add_edges_to_target()` +- Parallel execution optimization + +### 04_complex_workflow.py +Shows a complex multi-layer workflow with multiple parallel branches and convergence points. + +**Key Concepts:** +- Multi-layer workflows +- Parallel chains: `add_parallel_chain()` +- Complex graph structures + +### 05_performance_benchmark.py +Benchmarks performance differences between NetworkX and Rustworkx for various graph sizes and structures. + +**Key Concepts:** +- Performance benchmarking +- Scalability testing +- Different graph topologies (chain, tree) + +### 06_error_handling.py +Demonstrates error handling and graceful fallback behavior when rustworkx is unavailable. + +**Key Concepts:** +- Error handling +- Automatic fallback to NetworkX +- Backend availability checking + +### 07_large_scale_workflow.py +Demonstrates rustworkx's efficiency with large-scale workflows containing many agents. + +**Key Concepts:** +- Large-scale workflows +- Performance with many nodes/edges +- Complex interconnections + +### 08_parallel_chain_example.py +Detailed example of the parallel chain pattern creating a full mesh connection. + +**Key Concepts:** +- Parallel chain pattern +- Full mesh connections +- Maximum parallelization + +### 09_workflow_validation.py +Shows workflow validation features including cycle detection, isolated nodes, and auto-fixing. + +**Key Concepts:** +- Workflow validation +- Cycle detection +- Auto-fixing capabilities + +### 10_real_world_scenario.py +A realistic market research workflow demonstrating real-world agent coordination scenarios. + +**Key Concepts:** +- Real-world use case +- Complex multi-phase workflow +- Practical application + +## Quick Start + +Run any example: + +```bash +python 01_basic_usage.py +``` + +## Backend Selection + +To use rustworkx backend: + +```python +workflow = GraphWorkflow( + backend="rustworkx", # Use rustworkx + # ... other parameters +) +``` + +To use NetworkX backend (default): + +```python +workflow = GraphWorkflow( + backend="networkx", # Or omit for default + # ... other parameters +) +``` + +## Performance Benefits + +Rustworkx provides performance benefits especially for: +- **Large graphs** (100+ nodes) +- **Complex operations** (topological sorting, cycle detection) +- **Frequent graph modifications** (adding/removing nodes/edges) + +## Key Differences + +While both backends are functionally equivalent, rustworkx: +- Uses integer indices internally (abstracted away) +- Provides faster graph operations +- Better memory efficiency for large graphs +- Maintains full compatibility with GraphWorkflow API + +## Notes + +- Both backends produce identical results +- Rustworkx automatically falls back to NetworkX if not installed +- All GraphWorkflow features work with both backends +- Performance gains become more significant with larger graphs + +## Requirements + +- `swarms` package +- `rustworkx` (optional, for rustworkx backend) +- `networkx` (always available, default backend) + +## Contributing + +Feel free to add more examples demonstrating rustworkx capabilities or specific use cases! + diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py new file mode 100644 index 00000000..65cc4776 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py @@ -0,0 +1,632 @@ +import pytest +from swarms.structs.graph_workflow import ( + GraphWorkflow, +) +from swarms.structs.agent import Agent + +try: + import rustworkx as rx + + RUSTWORKX_AVAILABLE = True +except ImportError: + RUSTWORKX_AVAILABLE = False + + +def create_test_agent(name: str, description: str = None) -> Agent: + """Create a test agent""" + if description is None: + description = f"Test agent for {name} operations" + + return Agent( + agent_name=name, + agent_description=description, + model_name="gpt-4o-mini", + verbose=False, + print_on=False, + max_loops=1, + ) + + +@pytest.mark.skipif( + not RUSTWORKX_AVAILABLE, reason="rustworkx not available" +) +class TestRustworkxBackend: + """Test suite for rustworkx backend""" + + def test_rustworkx_backend_initialization(self): + """Test that rustworkx backend is properly initialized""" + workflow = GraphWorkflow(name="Test", backend="rustworkx") + assert ( + workflow.graph_backend.__class__.__name__ + == "RustworkxBackend" + ) + assert hasattr(workflow.graph_backend, "_node_id_to_index") + assert hasattr(workflow.graph_backend, "_index_to_node_id") + assert hasattr(workflow.graph_backend, "graph") + + def test_rustworkx_node_addition(self): + """Test adding nodes to rustworkx backend""" + workflow = GraphWorkflow(name="Test", backend="rustworkx") + agent = create_test_agent("TestAgent", "Test agent") + + workflow.add_node(agent) + + assert "TestAgent" in workflow.nodes + assert "TestAgent" in workflow.graph_backend._node_id_to_index + assert ( + workflow.graph_backend._node_id_to_index["TestAgent"] + in workflow.graph_backend._index_to_node_id + ) + + def test_rustworkx_edge_addition(self): + """Test adding edges to rustworkx backend""" + workflow = GraphWorkflow(name="Test", backend="rustworkx") + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + assert len(workflow.edges) == 1 + assert workflow.edges[0].source == "Agent1" + assert workflow.edges[0].target == "Agent2" + + def test_rustworkx_topological_generations_linear(self): + """Test topological generations with linear chain""" + workflow = GraphWorkflow( + name="Linear-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(5) + ] + + for agent in agents: + workflow.add_node(agent) + + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + workflow.compile() + + assert len(workflow._sorted_layers) == 5 + assert workflow._sorted_layers[0] == ["Agent0"] + assert workflow._sorted_layers[1] == ["Agent1"] + assert workflow._sorted_layers[2] == ["Agent2"] + assert workflow._sorted_layers[3] == ["Agent3"] + assert workflow._sorted_layers[4] == ["Agent4"] + + def test_rustworkx_topological_generations_fan_out(self): + """Test topological generations with fan-out pattern""" + workflow = GraphWorkflow( + name="FanOut-Test", backend="rustworkx" + ) + coordinator = create_test_agent("Coordinator", "Coordinates") + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + + workflow.add_node(coordinator) + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + + workflow.add_edges_from_source( + coordinator, [analyst1, analyst2, analyst3] + ) + + workflow.compile() + + assert len(workflow._sorted_layers) == 2 + assert len(workflow._sorted_layers[0]) == 1 + assert "Coordinator" in workflow._sorted_layers[0] + assert len(workflow._sorted_layers[1]) == 3 + assert "Analyst1" in workflow._sorted_layers[1] + assert "Analyst2" in workflow._sorted_layers[1] + assert "Analyst3" in workflow._sorted_layers[1] + + def test_rustworkx_topological_generations_fan_in(self): + """Test topological generations with fan-in pattern""" + workflow = GraphWorkflow( + name="FanIn-Test", backend="rustworkx" + ) + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + synthesizer = create_test_agent("Synthesizer", "Synthesizes") + + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + workflow.add_node(synthesizer) + + workflow.add_edges_to_target( + [analyst1, analyst2, analyst3], synthesizer + ) + + workflow.compile() + + assert len(workflow._sorted_layers) == 2 + assert len(workflow._sorted_layers[0]) == 3 + assert "Analyst1" in workflow._sorted_layers[0] + assert "Analyst2" in workflow._sorted_layers[0] + assert "Analyst3" in workflow._sorted_layers[0] + assert len(workflow._sorted_layers[1]) == 1 + assert "Synthesizer" in workflow._sorted_layers[1] + + def test_rustworkx_topological_generations_complex(self): + """Test topological generations with complex topology""" + workflow = GraphWorkflow( + name="Complex-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(6) + ] + + for agent in agents: + workflow.add_node(agent) + + # Create: Agent0 -> Agent1, Agent2 + # Agent1, Agent2 -> Agent3 + # Agent3 -> Agent4, Agent5 + workflow.add_edge(agents[0], agents[1]) + workflow.add_edge(agents[0], agents[2]) + workflow.add_edge(agents[1], agents[3]) + workflow.add_edge(agents[2], agents[3]) + workflow.add_edge(agents[3], agents[4]) + workflow.add_edge(agents[3], agents[5]) + + workflow.compile() + + assert len(workflow._sorted_layers) == 4 + assert "Agent0" in workflow._sorted_layers[0] + assert ( + "Agent1" in workflow._sorted_layers[1] + or "Agent2" in workflow._sorted_layers[1] + ) + assert "Agent3" in workflow._sorted_layers[2] + assert ( + "Agent4" in workflow._sorted_layers[3] + or "Agent5" in workflow._sorted_layers[3] + ) + + def test_rustworkx_predecessors(self): + """Test predecessor retrieval""" + workflow = GraphWorkflow( + name="Predecessors-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + predecessors = list( + workflow.graph_backend.predecessors("Agent2") + ) + assert "Agent1" in predecessors + assert len(predecessors) == 1 + + predecessors = list( + workflow.graph_backend.predecessors("Agent3") + ) + assert "Agent2" in predecessors + assert len(predecessors) == 1 + + predecessors = list( + workflow.graph_backend.predecessors("Agent1") + ) + assert len(predecessors) == 0 + + def test_rustworkx_descendants(self): + """Test descendant retrieval""" + workflow = GraphWorkflow( + name="Descendants-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + descendants = workflow.graph_backend.descendants("Agent1") + assert "Agent2" in descendants + assert "Agent3" in descendants + assert len(descendants) == 2 + + descendants = workflow.graph_backend.descendants("Agent2") + assert "Agent3" in descendants + assert len(descendants) == 1 + + descendants = workflow.graph_backend.descendants("Agent3") + assert len(descendants) == 0 + + def test_rustworkx_in_degree(self): + """Test in-degree calculation""" + workflow = GraphWorkflow( + name="InDegree-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent3, agent2) + + assert workflow.graph_backend.in_degree("Agent1") == 0 + assert workflow.graph_backend.in_degree("Agent2") == 2 + assert workflow.graph_backend.in_degree("Agent3") == 0 + + def test_rustworkx_out_degree(self): + """Test out-degree calculation""" + workflow = GraphWorkflow( + name="OutDegree-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent1, agent3) + + assert workflow.graph_backend.out_degree("Agent1") == 2 + assert workflow.graph_backend.out_degree("Agent2") == 0 + assert workflow.graph_backend.out_degree("Agent3") == 0 + + def test_rustworkx_agent_objects_in_edges(self): + """Test using Agent objects directly in edge methods""" + workflow = GraphWorkflow( + name="AgentObjects-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + # Use Agent objects directly + workflow.add_edges_from_source(agent1, [agent2, agent3]) + workflow.add_edges_to_target([agent2, agent3], agent1) + + workflow.compile() + + assert len(workflow.edges) == 4 + assert len(workflow._sorted_layers) >= 1 + + def test_rustworkx_parallel_chain(self): + """Test parallel chain pattern""" + workflow = GraphWorkflow( + name="ParallelChain-Test", backend="rustworkx" + ) + sources = [ + create_test_agent(f"Source{i}", f"Source {i}") + for i in range(3) + ] + targets = [ + create_test_agent(f"Target{i}", f"Target {i}") + for i in range(3) + ] + + for agent in sources + targets: + workflow.add_node(agent) + + workflow.add_parallel_chain(sources, targets) + + workflow.compile() + + assert len(workflow.edges) == 9 # 3x3 = 9 edges + assert len(workflow._sorted_layers) == 2 + + def test_rustworkx_large_scale(self): + """Test rustworkx with large workflow""" + workflow = GraphWorkflow( + name="LargeScale-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(20) + ] + + for agent in agents: + workflow.add_node(agent) + + # Create linear chain + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + workflow.compile() + + assert len(workflow._sorted_layers) == 20 + assert len(workflow.nodes) == 20 + assert len(workflow.edges) == 19 + + def test_rustworkx_reverse(self): + """Test graph reversal""" + workflow = GraphWorkflow( + name="Reverse-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + reversed_backend = workflow.graph_backend.reverse() + + # In reversed graph, Agent2 should have Agent1 as predecessor + preds = list(reversed_backend.predecessors("Agent1")) + assert "Agent2" in preds + + # Agent2 should have no predecessors in reversed graph + preds = list(reversed_backend.predecessors("Agent2")) + assert len(preds) == 0 + + def test_rustworkx_entry_end_points(self): + """Test entry and end point detection""" + workflow = GraphWorkflow( + name="EntryEnd-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "Entry agent") + agent2 = create_test_agent("Agent2", "Middle agent") + agent3 = create_test_agent("Agent3", "End agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + workflow.auto_set_entry_points() + workflow.auto_set_end_points() + + assert "Agent1" in workflow.entry_points + assert "Agent3" in workflow.end_points + assert workflow.graph_backend.in_degree("Agent1") == 0 + assert workflow.graph_backend.out_degree("Agent3") == 0 + + def test_rustworkx_isolated_nodes(self): + """Test handling of isolated nodes""" + workflow = GraphWorkflow( + name="Isolated-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "Connected agent") + agent2 = create_test_agent("Agent2", "Isolated agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent1) # Self-loop + + workflow.compile() + + assert len(workflow.nodes) == 2 + assert "Agent2" in workflow.nodes + + def test_rustworkx_workflow_execution(self): + """Test full workflow execution with rustworkx""" + workflow = GraphWorkflow( + name="Execution-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + result = workflow.run("Test task") + + assert result is not None + assert "Agent1" in result + assert "Agent2" in result + + def test_rustworkx_compilation_caching(self): + """Test that compilation is cached correctly""" + workflow = GraphWorkflow( + name="Cache-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + # First compilation + workflow.compile() + layers1 = workflow._sorted_layers.copy() + compiled1 = workflow._compiled + + # Second compilation should use cache + workflow.compile() + layers2 = workflow._sorted_layers.copy() + compiled2 = workflow._compiled + + assert compiled1 == compiled2 == True + assert layers1 == layers2 + + def test_rustworkx_node_metadata(self): + """Test node metadata handling""" + workflow = GraphWorkflow( + name="Metadata-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Test agent") + + workflow.add_node( + agent, metadata={"priority": "high", "timeout": 60} + ) + + node_index = workflow.graph_backend._node_id_to_index["Agent"] + node_data = workflow.graph_backend.graph[node_index] + + assert isinstance(node_data, dict) + assert node_data.get("node_id") == "Agent" + assert node_data.get("priority") == "high" + assert node_data.get("timeout") == 60 + + def test_rustworkx_edge_metadata(self): + """Test edge metadata handling""" + workflow = GraphWorkflow( + name="EdgeMetadata-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2, weight=5, label="test") + + assert len(workflow.edges) == 1 + assert workflow.edges[0].metadata.get("weight") == 5 + assert workflow.edges[0].metadata.get("label") == "test" + + +@pytest.mark.skipif( + not RUSTWORKX_AVAILABLE, reason="rustworkx not available" +) +class TestRustworkxPerformance: + """Performance tests for rustworkx backend""" + + def test_rustworkx_large_graph_compilation(self): + """Test compilation performance with large graph""" + workflow = GraphWorkflow( + name="LargeGraph-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(50) + ] + + for agent in agents: + workflow.add_node(agent) + + # Create a complex topology + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + import time + + start = time.time() + workflow.compile() + compile_time = time.time() - start + + assert compile_time < 1.0 # Should compile quickly + assert len(workflow._sorted_layers) == 50 + + def test_rustworkx_many_predecessors(self): + """Test performance with many predecessors""" + workflow = GraphWorkflow( + name="ManyPreds-Test", backend="rustworkx" + ) + target = create_test_agent("Target", "Target agent") + sources = [ + create_test_agent(f"Source{i}", f"Source {i}") + for i in range(100) + ] + + workflow.add_node(target) + for source in sources: + workflow.add_node(source) + + workflow.add_edges_to_target(sources, target) + + workflow.compile() + + predecessors = list( + workflow.graph_backend.predecessors("Target") + ) + assert len(predecessors) == 100 + + +@pytest.mark.skipif( + not RUSTWORKX_AVAILABLE, reason="rustworkx not available" +) +class TestRustworkxEdgeCases: + """Edge case tests for rustworkx backend""" + + def test_rustworkx_empty_graph(self): + """Test empty graph handling""" + workflow = GraphWorkflow( + name="Empty-Test", backend="rustworkx" + ) + workflow.compile() + + assert len(workflow._sorted_layers) == 0 + assert len(workflow.nodes) == 0 + + def test_rustworkx_single_node(self): + """Test single node graph""" + workflow = GraphWorkflow( + name="Single-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Single agent") + + workflow.add_node(agent) + workflow.compile() + + assert len(workflow._sorted_layers) == 1 + assert workflow._sorted_layers[0] == ["Agent"] + + def test_rustworkx_self_loop(self): + """Test self-loop handling""" + workflow = GraphWorkflow( + name="SelfLoop-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Self-looping agent") + + workflow.add_node(agent) + workflow.add_edge(agent, agent) + + workflow.compile() + + assert len(workflow.edges) == 1 + assert workflow.graph_backend.in_degree("Agent") == 1 + assert workflow.graph_backend.out_degree("Agent") == 1 + + def test_rustworkx_duplicate_edge(self): + """Test duplicate edge handling""" + workflow = GraphWorkflow( + name="Duplicate-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + + # Add same edge twice + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent1, agent2) + + # rustworkx should handle duplicate edges + assert ( + len(workflow.edges) == 2 + ) # Both edges are stored in workflow + workflow.compile() # Should not crash + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/examples/multi_agent/swarm_router/swarm_router.py b/examples/multi_agent/swarm_router/swarm_router.py index 1801c25a..b8f73365 100644 --- a/examples/multi_agent/swarm_router/swarm_router.py +++ b/examples/multi_agent/swarm_router/swarm_router.py @@ -26,7 +26,6 @@ router = SwarmRouter( agents=agents, swarm_type="SequentialWorkflow", output_type="dict", - return_entire_history=False, ) output = router.run("How are you doing?") diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index a8f7bea4..e693a90c 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -12,8 +12,10 @@ from typing import Any, Callable, Dict, List, Literal, Optional from uuid import uuid4 from loguru import logger +from mcp.server.auth.settings import AuthSettings from mcp.server.fastmcp import FastMCP from mcp.server.lowlevel.server import LifespanResultT +from mcp.server.transport_security import TransportSecuritySettings from swarms.structs.agent import Agent from swarms.structs.omni_agent_types import AgentType @@ -21,7 +23,6 @@ from swarms.tools.mcp_client_tools import ( get_tools_for_multiple_mcp_servers, ) -from mcp.server.fastmcp import AuthSettings, TransportSecuritySettings class TaskStatus(Enum): """Status of a task in the queue.""" @@ -603,7 +604,13 @@ class AOP: log_level: Literal[ "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" ] = "INFO", - lifespan: Callable[[FastMCP[LifespanResultT]], AbstractAsyncContextManager[LifespanResultT]] | None = None, + lifespan: ( + Callable[ + [FastMCP[LifespanResultT]], + AbstractAsyncContextManager[LifespanResultT], + ] + | None + ) = None, auth: AuthSettings | None = None, transport_security: TransportSecuritySettings | None = None, *args, diff --git a/swarms/structs/graph_workflow.py b/swarms/structs/graph_workflow.py index 4a2b0c90..d1a23594 100644 --- a/swarms/structs/graph_workflow.py +++ b/swarms/structs/graph_workflow.py @@ -1,10 +1,10 @@ -import json import asyncio import concurrent.futures +import json import time -from enum import Enum -from typing import Any, Dict, List, Optional import uuid +from enum import Enum +from typing import Any, Dict, Iterator, List, Optional, Set import networkx as nx @@ -16,6 +16,14 @@ except ImportError: GRAPHVIZ_AVAILABLE = False graphviz = None +try: + import rustworkx as rx + + RUSTWORKX_AVAILABLE = True +except ImportError: + RUSTWORKX_AVAILABLE = False + rx = None + from swarms.structs.agent import Agent # noqa: F401 from swarms.structs.conversation import Conversation from swarms.utils.get_cpu_cores import get_cpu_cores @@ -24,6 +32,525 @@ from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger(log_folder="graph_workflow") +class GraphBackend: + """ + Abstract base class for graph backends. + Provides a unified interface for different graph libraries. + """ + + def add_node(self, node_id: str, **attrs) -> None: + """ + Add a node to the graph. + + Args: + node_id (str): The unique identifier of the node. + **attrs: Additional attributes for the node. + """ + raise NotImplementedError + + def add_edge(self, source: str, target: str, **attrs) -> None: + """ + Add an edge to the graph. + + Args: + source (str): The source node ID. + target (str): The target node ID. + **attrs: Additional attributes for the edge. + """ + raise NotImplementedError + + def in_degree(self, node_id: str) -> int: + """ + Get the in-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The in-degree of the node. + """ + raise NotImplementedError + + def out_degree(self, node_id: str) -> int: + """ + Get the out-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The out-degree of the node. + """ + raise NotImplementedError + + def predecessors(self, node_id: str) -> Iterator[str]: + """ + Get the predecessors of a node. + + Args: + node_id (str): The node ID. + + Returns: + Iterator[str]: Iterator of predecessor node IDs. + """ + raise NotImplementedError + + def reverse(self) -> "GraphBackend": + """ + Return a reversed copy of the graph. + + Returns: + GraphBackend: A new backend instance with reversed edges. + """ + raise NotImplementedError + + def topological_generations(self) -> List[List[str]]: + """ + Get topological generations (layers) of the graph. + + Returns: + List[List[str]]: List of layers, where each layer is a list of node IDs. + """ + raise NotImplementedError + + def simple_cycles(self) -> List[List[str]]: + """ + Find simple cycles in the graph. + + Returns: + List[List[str]]: List of cycles, where each cycle is a list of node IDs. + """ + raise NotImplementedError + + def descendants(self, node_id: str) -> Set[str]: + """ + Get all descendants of a node. + + Args: + node_id (str): The node ID. + + Returns: + Set[str]: Set of descendant node IDs. + """ + raise NotImplementedError + + +class NetworkXBackend(GraphBackend): + """ + NetworkX backend implementation. + """ + + def __init__(self): + """ + Initialize the NetworkX backend. + """ + self.graph = nx.DiGraph() + + def add_node(self, node_id: str, **attrs) -> None: + """ + Add a node to the NetworkX graph. + + Args: + node_id (str): The unique identifier of the node. + **attrs: Additional attributes for the node. + """ + self.graph.add_node(node_id, **attrs) + + def add_edge(self, source: str, target: str, **attrs) -> None: + """ + Add an edge to the NetworkX graph. + + Args: + source (str): The source node ID. + target (str): The target node ID. + **attrs: Additional attributes for the edge. + """ + self.graph.add_edge(source, target, **attrs) + + def in_degree(self, node_id: str) -> int: + """ + Get the in-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The in-degree of the node. + """ + return self.graph.in_degree(node_id) + + def out_degree(self, node_id: str) -> int: + """ + Get the out-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The out-degree of the node. + """ + return self.graph.out_degree(node_id) + + def predecessors(self, node_id: str) -> Iterator[str]: + """ + Get the predecessors of a node. + + Args: + node_id (str): The node ID. + + Returns: + Iterator[str]: Iterator of predecessor node IDs. + """ + return self.graph.predecessors(node_id) + + def reverse(self) -> "NetworkXBackend": + """ + Return a reversed copy of the graph. + + Returns: + NetworkXBackend: A new backend instance with reversed edges. + """ + reversed_backend = NetworkXBackend() + reversed_backend.graph = self.graph.reverse() + return reversed_backend + + def topological_generations(self) -> List[List[str]]: + """ + Get topological generations (layers) of the graph. + + Returns: + List[List[str]]: List of layers, where each layer is a list of node IDs. + """ + return list(nx.topological_generations(self.graph)) + + def simple_cycles(self) -> List[List[str]]: + """ + Find simple cycles in the graph. + + Returns: + List[List[str]]: List of cycles, where each cycle is a list of node IDs. + """ + return list(nx.simple_cycles(self.graph)) + + def descendants(self, node_id: str) -> Set[str]: + """ + Get all descendants of a node. + + Args: + node_id (str): The node ID. + + Returns: + Set[str]: Set of descendant node IDs. + """ + return nx.descendants(self.graph, node_id) + + +class RustworkxBackend(GraphBackend): + """ + Rustworkx backend implementation. + Uses integer indices internally but exposes string node IDs. + """ + + def __init__(self): + """ + Initialize the Rustworkx backend. + """ + if not RUSTWORKX_AVAILABLE: + raise ImportError( + "rustworkx is not installed. Install it with: pip install rustworkx" + ) + self.graph = rx.PyDiGraph() + # Mapping from node ID (string) to node index (int) + self._node_id_to_index: Dict[str, int] = {} + # Mapping from node index (int) to node ID (string) + self._index_to_node_id: Dict[int, str] = {} + + def _get_or_create_node_index(self, node_id: str) -> int: + """ + Get the node index for a given node ID, creating it if necessary. + + Args: + node_id (str): The node ID. + + Returns: + int: The node index. + """ + if node_id not in self._node_id_to_index: + node_index = self.graph.add_node(node_id) + self._node_id_to_index[node_id] = node_index + self._index_to_node_id[node_index] = node_id + return self._node_id_to_index[node_id] + + def add_node(self, node_id: str, **attrs) -> None: + """ + Add a node to the Rustworkx graph. + + Args: + node_id (str): The unique identifier of the node. + **attrs: Additional attributes for the node (stored in node data). + """ + if node_id not in self._node_id_to_index: + # Store node data as a dict with the node_id and attributes + node_data = {"node_id": node_id, **attrs} + node_index = self.graph.add_node(node_data) + self._node_id_to_index[node_id] = node_index + self._index_to_node_id[node_index] = node_id + else: + # Update existing node data + node_index = self._node_id_to_index[node_id] + node_data = self.graph[node_index] + if isinstance(node_data, dict): + node_data.update(attrs) + else: + self.graph[node_index] = {"node_id": node_id, **attrs} + + def add_edge(self, source: str, target: str, **attrs) -> None: + """ + Add an edge to the Rustworkx graph. + + Args: + source (str): The source node ID. + target (str): The target node ID. + **attrs: Additional attributes for the edge (stored in edge data). + """ + source_idx = self._get_or_create_node_index(source) + target_idx = self._get_or_create_node_index(target) + edge_data = attrs if attrs else None + self.graph.add_edge(source_idx, target_idx, edge_data) + + def in_degree(self, node_id: str) -> int: + """ + Get the in-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The in-degree of the node. + """ + if node_id not in self._node_id_to_index: + return 0 + node_index = self._node_id_to_index[node_id] + return self.graph.in_degree(node_index) + + def out_degree(self, node_id: str) -> int: + """ + Get the out-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The out-degree of the node. + """ + if node_id not in self._node_id_to_index: + return 0 + node_index = self._node_id_to_index[node_id] + return self.graph.out_degree(node_index) + + def predecessors(self, node_id: str) -> Iterator[str]: + """ + Get the predecessors of a node. + + Args: + node_id (str): The node ID. + + Returns: + Iterator[str]: Iterator of predecessor node IDs. + """ + if node_id not in self._node_id_to_index: + return iter([]) + target_index = self._node_id_to_index[node_id] + # Use edge list to find predecessors (more reliable than predecessors() method) + result = [] + for edge in self.graph.edge_list(): + source_idx, target_idx = edge + if target_idx == target_index: + result.append(self._index_to_node_id[source_idx]) + return iter(result) + + def reverse(self) -> "RustworkxBackend": + """ + Return a reversed copy of the graph. + + Returns: + RustworkxBackend: A new backend instance with reversed edges. + """ + reversed_backend = RustworkxBackend() + # Copy the graph structure + reversed_backend.graph = self.graph.copy() + # Reverse the edges + reversed_backend.graph.reverse() + # Copy the mappings + reversed_backend._node_id_to_index = ( + self._node_id_to_index.copy() + ) + reversed_backend._index_to_node_id = ( + self._index_to_node_id.copy() + ) + return reversed_backend + + def topological_generations(self) -> List[List[str]]: + """ + Get topological generations (layers) of the graph. + + Returns: + List[List[str]]: List of layers, where each layer is a list of node IDs. + """ + try: + # Get all node indices + all_indices = list(self._node_id_to_index.values()) + if not all_indices: + return [] + + # Use layer-by-layer approach similar to NetworkX topological_generations + layers = [] + remaining = set(all_indices) + processed = set() + + while remaining: + # Find all nodes with in-degree 0 considering only edges from processed nodes + # In rustworkx, we need to check if all predecessors are in processed set + layer = [] + # First pass: identify nodes that can be added to this layer + # (without modifying remaining/processed during iteration) + nodes_to_add = [] + for idx in list(remaining): + # Get all predecessors using edge list + pred_indices = [] + for edge in self.graph.edge_list(): + source_idx, target_idx = edge + if target_idx == idx: + pred_indices.append(source_idx) + # Check if all predecessors have been processed (or node has no predecessors) + # A node can be added to the layer if: + # 1. It has no predecessors (entry node), OR + # 2. All its predecessors have already been processed (from previous layers) + if not pred_indices: + # No predecessors - this is an entry node + nodes_to_add.append(idx) + elif all( + pred_idx in processed + for pred_idx in pred_indices + ): + # All predecessors have been processed in previous layers + nodes_to_add.append(idx) + + # Second pass: add identified nodes to the layer and update sets + for idx in nodes_to_add: + layer.append(self._index_to_node_id[idx]) + remaining.remove(idx) + processed.add(idx) + + if not layer: + # Cycle detected or error, break + break + + layers.append(layer) + + # If there are remaining nodes, they form a cycle - add them as a final layer + if remaining: + cycle_layer = [ + self._index_to_node_id[idx] for idx in remaining + ] + layers.append(cycle_layer) + + return ( + layers + if layers + else [ + [ + self._index_to_node_id[idx] + for idx in all_indices + ] + ] + ) + except Exception as e: + logger.warning( + f"Error in rustworkx topological_generations: {e}, falling back to simple approach" + ) + # Fallback: return all nodes in one layer + return [ + [node_id for node_id in self._node_id_to_index.keys()] + ] + + def simple_cycles(self) -> List[List[str]]: + """ + Find simple cycles in the graph. + + Returns: + List[List[str]]: List of cycles, where each cycle is a list of node IDs. + """ + try: + # Convert to NetworkX temporarily for cycle detection + # This is a limitation of rustworkx - it doesn't have simple_cycles + # We'll use a workaround by converting temporarily + import networkx as nx + + nx_graph = nx.DiGraph() + for node_id in self._node_id_to_index.keys(): + nx_graph.add_node(node_id) + for edge in self.graph.edge_list(): + source_idx, target_idx = edge + source_id = self._index_to_node_id[source_idx] + target_id = self._index_to_node_id[target_idx] + nx_graph.add_edge(source_id, target_id) + + cycles = list(nx.simple_cycles(nx_graph)) + return cycles + except Exception as e: + logger.warning( + f"Error in rustworkx simple_cycles: {e}, returning empty list" + ) + return [] + + def descendants(self, node_id: str) -> Set[str]: + """ + Get all descendants of a node. + + Args: + node_id (str): The node ID. + + Returns: + Set[str]: Set of descendant node IDs. + """ + if node_id not in self._node_id_to_index: + return set() + node_index = self._node_id_to_index[node_id] + # Use BFS to find all descendants + descendants = set() + queue = [node_index] + visited = {node_index} + + while queue: + current_idx = queue.pop(0) + succ_data = self.graph.successors(current_idx) + for succ in succ_data: + # Handle both dict (node data) and int (index) returns + if isinstance(succ, dict): + succ_node_id = succ.get("node_id") + if ( + succ_node_id + and succ_node_id in self._node_id_to_index + ): + succ_idx = self._node_id_to_index[ + succ_node_id + ] + else: + continue + elif isinstance(succ, int): + succ_idx = succ + else: + continue + + if succ_idx not in visited: + visited.add(succ_idx) + descendants.add(self._index_to_node_id[succ_idx]) + queue.append(succ_idx) + + return descendants + + class NodeType(str, Enum): AGENT: Agent = "agent" @@ -129,17 +656,37 @@ class Edge: Returns: Edge: A new Edge instance. """ - src = ( - source_node.id - if isinstance(source_node, Node) - else source_node - ) - tgt = ( - target_node.id - if isinstance(target_node, Node) - else target_node - ) - return cls(source=src, target=tgt, **kwargs) + # Handle source node: extract ID from Node, Agent, or use string directly + if isinstance(source_node, Node): + src = source_node.id + elif hasattr(source_node, "agent_name"): + # Agent object - extract agent_name + src = getattr(source_node, "agent_name", None) + if src is None: + raise ValueError( + "Source agent does not have an agent_name attribute" + ) + else: + # Assume it's already a string ID + src = source_node + + # Handle target node: extract ID from Node, Agent, or use string directly + if isinstance(target_node, Node): + tgt = target_node.id + elif hasattr(target_node, "agent_name"): + # Agent object - extract agent_name + tgt = getattr(target_node, "agent_name", None) + if tgt is None: + raise ValueError( + "Target agent does not have an agent_name attribute" + ) + else: + # Assume it's already a string ID + tgt = target_node + + # Put all kwargs into metadata dict + metadata = kwargs if kwargs else None + return cls(source=src, target=tgt, metadata=metadata) class GraphWorkflow: @@ -151,7 +698,7 @@ class GraphWorkflow: edges (List[Edge]): A list of edges in the graph, where each edge is represented by an Edge object. entry_points (List[str]): A list of node IDs that serve as entry points to the graph. end_points (List[str]): A list of node IDs that serve as end points of the graph. - graph (nx.DiGraph): A directed graph object from the NetworkX library representing the workflow graph. + graph_backend (GraphBackend): A graph backend object (NetworkX or Rustworkx) representing the workflow graph. task (str): The task to be executed by the workflow. _compiled (bool): Whether the graph has been compiled for optimization. _sorted_layers (List[List[str]]): Pre-computed topological layers for faster execution. @@ -174,6 +721,7 @@ class GraphWorkflow: task: Optional[str] = None, auto_compile: bool = True, verbose: bool = False, + backend: str = "networkx", ): self.id = id self.verbose = verbose @@ -181,14 +729,30 @@ class GraphWorkflow: if self.verbose: logger.info("Initializing GraphWorkflow") logger.debug( - f"GraphWorkflow parameters: nodes={len(nodes) if nodes else 0}, edges={len(edges) if edges else 0}, max_loops={max_loops}, auto_compile={auto_compile}" + f"GraphWorkflow parameters: nodes={len(nodes) if nodes else 0}, edges={len(edges) if edges else 0}, max_loops={max_loops}, auto_compile={auto_compile}, backend={backend}" ) self.nodes = nodes or {} self.edges = edges or [] self.entry_points = entry_points or [] self.end_points = end_points or [] - self.graph = nx.DiGraph() + + # Initialize graph backend + if backend.lower() == "rustworkx": + if not RUSTWORKX_AVAILABLE: + logger.warning( + "rustworkx is not available, falling back to networkx. Install with: pip install rustworkx" + ) + self.graph_backend = NetworkXBackend() + else: + self.graph_backend = RustworkxBackend() + if self.verbose: + logger.info("Using rustworkx backend") + else: + self.graph_backend = NetworkXBackend() + if self.verbose: + logger.info("Using networkx backend") + self.max_loops = max_loops self.task = task self.name = name @@ -208,15 +772,20 @@ class GraphWorkflow: self.conversation = Conversation() - # Rebuild the NetworkX graph from nodes and edges if provided + # Rebuild the graph from nodes and edges if provided if self.nodes: + backend_name = ( + "rustworkx" + if isinstance(self.graph_backend, RustworkxBackend) + else "networkx" + ) if self.verbose: logger.info( - f"Adding {len(self.nodes)} nodes to NetworkX graph" + f"Adding {len(self.nodes)} nodes to {backend_name} graph" ) for node_id, node in self.nodes.items(): - self.graph.add_node( + self.graph_backend.add_node( node_id, type=node.type, agent=node.agent, @@ -228,9 +797,14 @@ class GraphWorkflow: ) if self.edges: + backend_name = ( + "rustworkx" + if isinstance(self.graph_backend, RustworkxBackend) + else "networkx" + ) if self.verbose: logger.info( - f"Adding {len(self.edges)} edges to NetworkX graph" + f"Adding {len(self.edges)} edges to {backend_name} graph" ) valid_edges = 0 @@ -239,7 +813,7 @@ class GraphWorkflow: edge.source in self.nodes and edge.target in self.nodes ): - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}), @@ -328,8 +902,8 @@ class GraphWorkflow: if self.verbose: logger.debug("Computing topological layers") - sorted_layers = list( - nx.topological_generations(self.graph) + sorted_layers = ( + self.graph_backend.topological_generations() ) self._sorted_layers = sorted_layers @@ -380,7 +954,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.nodes[node.id] = node - self.graph.add_node( + self.graph_backend.add_node( node.id, type=node.type, agent=node.agent, @@ -434,7 +1008,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}) ) self._invalidate_compilation() @@ -492,7 +1066,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}) ) created_edges.append(edge) @@ -560,7 +1134,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}) ) created_edges.append(edge) @@ -629,7 +1203,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}), @@ -860,7 +1434,9 @@ class GraphWorkflow: try: self.entry_points = [ - n for n in self.nodes if self.graph.in_degree(n) == 0 + n + for n in self.nodes + if self.graph_backend.in_degree(n) == 0 ] if self.verbose: @@ -888,7 +1464,9 @@ class GraphWorkflow: try: self.end_points = [ - n for n in self.nodes if self.graph.out_degree(n) == 0 + n + for n in self.nodes + if self.graph_backend.out_degree(n) == 0 ] if self.verbose: @@ -921,7 +1499,7 @@ class GraphWorkflow: if node_id not in self._predecessors_cache: self._predecessors_cache[node_id] = tuple( - self.graph.predecessors(node_id) + self.graph_backend.predecessors(node_id) ) return self._predecessors_cache[node_id] @@ -2228,8 +2806,8 @@ class GraphWorkflow: isolated = [ n for n in self.nodes - if self.graph.in_degree(n) == 0 - and self.graph.out_degree(n) == 0 + if self.graph_backend.in_degree(n) == 0 + and self.graph_backend.out_degree(n) == 0 ] if isolated: result["warnings"].append( @@ -2238,7 +2816,7 @@ class GraphWorkflow: # Check for cyclic dependencies try: - cycles = list(nx.simple_cycles(self.graph)) + cycles = self.graph_backend.simple_cycles() if cycles: result["warnings"].append( f"Found {len(cycles)} cycles in workflow" @@ -2268,7 +2846,7 @@ class GraphWorkflow: reachable = set() for entry in self.entry_points: reachable.update( - nx.descendants(self.graph, entry) + self.graph_backend.descendants(entry) ) reachable.add(entry) @@ -2289,11 +2867,11 @@ class GraphWorkflow: # Check for dead-end nodes (cannot reach any exit point) if self.end_points: - reverse_graph = self.graph.reverse() + reverse_graph = self.graph_backend.reverse() reachable_to_exit = set() for exit_point in self.end_points: reachable_to_exit.update( - nx.descendants(reverse_graph, exit_point) + reverse_graph.descendants(exit_point) ) reachable_to_exit.add(exit_point) diff --git a/tests/structs/test_custom_agent.py b/tests/structs/test_custom_agent.py index 3cdeda25..63969b97 100644 --- a/tests/structs/test_custom_agent.py +++ b/tests/structs/test_custom_agent.py @@ -6,6 +6,7 @@ from swarms.structs.custom_agent import CustomAgent, AgentResponse try: import pytest_asyncio + ASYNC_AVAILABLE = True except ImportError: ASYNC_AVAILABLE = False @@ -40,7 +41,10 @@ def test_custom_agent_initialization(): timeout=30.0, verify_ssl=True, ) - assert custom_agent_instance.base_url == "https://api.example.com" + assert ( + custom_agent_instance.base_url + == "https://api.example.com" + ) assert custom_agent_instance.endpoint == "v1/endpoint" assert custom_agent_instance.timeout == 30.0 assert custom_agent_instance.verify_ssl is True @@ -51,7 +55,9 @@ def test_custom_agent_initialization(): raise -def test_custom_agent_initialization_with_default_headers(sample_custom_agent): +def test_custom_agent_initialization_with_default_headers( + sample_custom_agent, +): try: custom_agent_no_headers = CustomAgent( name="TestAgent", @@ -59,7 +65,9 @@ def test_custom_agent_initialization_with_default_headers(sample_custom_agent): base_url="https://api.test.com", endpoint="test", ) - assert "Content-Type" in custom_agent_no_headers.default_headers + assert ( + "Content-Type" in custom_agent_no_headers.default_headers + ) assert ( custom_agent_no_headers.default_headers["Content-Type"] == "application/json" @@ -78,7 +86,10 @@ def test_custom_agent_url_normalization(): base_url="https://api.test.com/", endpoint="/v1/test", ) - assert custom_agent_with_slashes.base_url == "https://api.test.com" + assert ( + custom_agent_with_slashes.base_url + == "https://api.test.com" + ) assert custom_agent_with_slashes.endpoint == "v1/test" logger.debug("URL normalization works correctly") except Exception as e: @@ -90,14 +101,22 @@ def test_prepare_headers(sample_custom_agent): try: prepared_headers = sample_custom_agent._prepare_headers() assert "Authorization" in prepared_headers - assert prepared_headers["Authorization"] == "Bearer test-token" + assert ( + prepared_headers["Authorization"] == "Bearer test-token" + ) additional_headers = {"X-Custom-Header": "custom-value"} prepared_headers_with_additional = ( sample_custom_agent._prepare_headers(additional_headers) ) - assert prepared_headers_with_additional["X-Custom-Header"] == "custom-value" - assert prepared_headers_with_additional["Authorization"] == "Bearer test-token" + assert ( + prepared_headers_with_additional["X-Custom-Header"] + == "custom-value" + ) + assert ( + prepared_headers_with_additional["Authorization"] + == "Bearer test-token" + ) logger.debug("Header preparation works correctly") except Exception as e: logger.error(f"Failed to test prepare_headers: {e}") @@ -107,7 +126,9 @@ def test_prepare_headers(sample_custom_agent): def test_prepare_payload_dict(sample_custom_agent): try: payload_dict = {"key": "value", "number": 123} - prepared_payload = sample_custom_agent._prepare_payload(payload_dict) + prepared_payload = sample_custom_agent._prepare_payload( + payload_dict + ) assert isinstance(prepared_payload, str) parsed = json.loads(prepared_payload) assert parsed["key"] == "value" @@ -121,22 +142,30 @@ def test_prepare_payload_dict(sample_custom_agent): def test_prepare_payload_string(sample_custom_agent): try: payload_string = '{"test": "value"}' - prepared_payload = sample_custom_agent._prepare_payload(payload_string) + prepared_payload = sample_custom_agent._prepare_payload( + payload_string + ) assert prepared_payload == payload_string logger.debug("String payload prepared correctly") except Exception as e: - logger.error(f"Failed to test prepare_payload with string: {e}") + logger.error( + f"Failed to test prepare_payload with string: {e}" + ) raise def test_prepare_payload_bytes(sample_custom_agent): try: payload_bytes = b'{"test": "value"}' - prepared_payload = sample_custom_agent._prepare_payload(payload_bytes) + prepared_payload = sample_custom_agent._prepare_payload( + payload_bytes + ) assert prepared_payload == payload_bytes logger.debug("Bytes payload prepared correctly") except Exception as e: - logger.error(f"Failed to test prepare_payload with bytes: {e}") + logger.error( + f"Failed to test prepare_payload with bytes: {e}" + ) raise @@ -148,7 +177,9 @@ def test_parse_response_success(sample_custom_agent): mock_response.headers = {"content-type": "application/json"} mock_response.json.return_value = {"message": "success"} - parsed_response = sample_custom_agent._parse_response(mock_response) + parsed_response = sample_custom_agent._parse_response( + mock_response + ) assert isinstance(parsed_response, AgentResponse) assert parsed_response.status_code == 200 assert parsed_response.success is True @@ -167,7 +198,9 @@ def test_parse_response_error(sample_custom_agent): mock_response.text = "Not Found" mock_response.headers = {"content-type": "text/plain"} - parsed_response = sample_custom_agent._parse_response(mock_response) + parsed_response = sample_custom_agent._parse_response( + mock_response + ) assert isinstance(parsed_response, AgentResponse) assert parsed_response.status_code == 404 assert parsed_response.success is False @@ -189,11 +222,15 @@ def test_extract_content_openai_format(sample_custom_agent): } ] } - extracted_content = sample_custom_agent._extract_content(openai_response) + extracted_content = sample_custom_agent._extract_content( + openai_response + ) assert extracted_content == "This is the response content" logger.debug("OpenAI format content extracted correctly") except Exception as e: - logger.error(f"Failed to test extract_content OpenAI format: {e}") + logger.error( + f"Failed to test extract_content OpenAI format: {e}" + ) raise @@ -202,25 +239,33 @@ def test_extract_content_anthropic_format(sample_custom_agent): anthropic_response = { "content": [ {"text": "First part "}, - {"text": "second part"} + {"text": "second part"}, ] } - extracted_content = sample_custom_agent._extract_content(anthropic_response) + extracted_content = sample_custom_agent._extract_content( + anthropic_response + ) assert extracted_content == "First part second part" logger.debug("Anthropic format content extracted correctly") except Exception as e: - logger.error(f"Failed to test extract_content Anthropic format: {e}") + logger.error( + f"Failed to test extract_content Anthropic format: {e}" + ) raise def test_extract_content_generic_format(sample_custom_agent): try: generic_response = {"text": "Generic response text"} - extracted_content = sample_custom_agent._extract_content(generic_response) + extracted_content = sample_custom_agent._extract_content( + generic_response + ) assert extracted_content == "Generic response text" logger.debug("Generic format content extracted correctly") except Exception as e: - logger.error(f"Failed to test extract_content generic format: {e}") + logger.error( + f"Failed to test extract_content generic format: {e}" + ) raise @@ -229,14 +274,18 @@ def test_run_success(mock_client_class, sample_custom_agent): try: mock_response = Mock() mock_response.status_code = 200 - mock_response.text = '{"choices": [{"message": {"content": "Success"}}]}' + mock_response.text = ( + '{"choices": [{"message": {"content": "Success"}}]}' + ) mock_response.json.return_value = { "choices": [{"message": {"content": "Success"}}] } mock_response.headers = {"content-type": "application/json"} mock_client_instance = Mock() - mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) + mock_client_instance.__enter__ = Mock( + return_value=mock_client_instance + ) mock_client_instance.__exit__ = Mock(return_value=None) mock_client_instance.post.return_value = mock_response mock_client_class.return_value = mock_client_instance @@ -259,7 +308,9 @@ def test_run_error_response(mock_client_class, sample_custom_agent): mock_response.text = "Internal Server Error" mock_client_instance = Mock() - mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) + mock_client_instance.__enter__ = Mock( + return_value=mock_client_instance + ) mock_client_instance.__exit__ = Mock(return_value=None) mock_client_instance.post.return_value = mock_response mock_client_class.return_value = mock_client_instance @@ -280,9 +331,13 @@ def test_run_request_error(mock_client_class, sample_custom_agent): import httpx mock_client_instance = Mock() - mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) + mock_client_instance.__enter__ = Mock( + return_value=mock_client_instance + ) mock_client_instance.__exit__ = Mock(return_value=None) - mock_client_instance.post.side_effect = httpx.RequestError("Connection failed") + mock_client_instance.post.side_effect = httpx.RequestError( + "Connection failed" + ) mock_client_class.return_value = mock_client_instance test_payload = {"message": "test"} @@ -295,23 +350,33 @@ def test_run_request_error(mock_client_class, sample_custom_agent): raise -@pytest.mark.skipif(not ASYNC_AVAILABLE, reason="pytest-asyncio not installed") +@pytest.mark.skipif( + not ASYNC_AVAILABLE, reason="pytest-asyncio not installed" +) @pytest.mark.asyncio @patch("swarms.structs.custom_agent.httpx.AsyncClient") -async def test_run_async_success(mock_async_client_class, sample_custom_agent): +async def test_run_async_success( + mock_async_client_class, sample_custom_agent +): try: mock_response = Mock() mock_response.status_code = 200 - mock_response.text = '{"content": [{"text": "Async Success"}]}' + mock_response.text = ( + '{"content": [{"text": "Async Success"}]}' + ) mock_response.json.return_value = { "content": [{"text": "Async Success"}] } mock_response.headers = {"content-type": "application/json"} mock_client_instance = AsyncMock() - mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) + mock_client_instance.__aenter__ = AsyncMock( + return_value=mock_client_instance + ) mock_client_instance.__aexit__ = AsyncMock(return_value=None) - mock_client_instance.post = AsyncMock(return_value=mock_response) + mock_client_instance.post = AsyncMock( + return_value=mock_response + ) mock_async_client_class.return_value = mock_client_instance test_payload = {"message": "test"} @@ -324,19 +389,27 @@ async def test_run_async_success(mock_async_client_class, sample_custom_agent): raise -@pytest.mark.skipif(not ASYNC_AVAILABLE, reason="pytest-asyncio not installed") +@pytest.mark.skipif( + not ASYNC_AVAILABLE, reason="pytest-asyncio not installed" +) @pytest.mark.asyncio @patch("swarms.structs.custom_agent.httpx.AsyncClient") -async def test_run_async_error_response(mock_async_client_class, sample_custom_agent): +async def test_run_async_error_response( + mock_async_client_class, sample_custom_agent +): try: mock_response = Mock() mock_response.status_code = 400 mock_response.text = "Bad Request" mock_client_instance = AsyncMock() - mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) + mock_client_instance.__aenter__ = AsyncMock( + return_value=mock_client_instance + ) mock_client_instance.__aexit__ = AsyncMock(return_value=None) - mock_client_instance.post = AsyncMock(return_value=mock_response) + mock_client_instance.post = AsyncMock( + return_value=mock_response + ) mock_async_client_class.return_value = mock_client_instance test_payload = {"message": "test"} @@ -367,4 +440,3 @@ def test_agent_response_dataclass(): except Exception as e: logger.error(f"Failed to test AgentResponse dataclass: {e}") raise - diff --git a/tests/structs/test_deep_discussion.py b/tests/structs/test_deep_discussion.py index f83a00c5..76aecd00 100644 --- a/tests/structs/test_deep_discussion.py +++ b/tests/structs/test_deep_discussion.py @@ -6,8 +6,10 @@ from swarms.structs.agent import Agent def create_function_agent(name: str, system_prompt: str = None): if system_prompt is None: - system_prompt = f"You are {name}. Provide thoughtful responses." - + system_prompt = ( + f"You are {name}. Provide thoughtful responses." + ) + agent = Agent( agent_name=name, agent_description=f"Test agent {name}", @@ -23,11 +25,11 @@ def create_function_agent(name: str, system_prompt: str = None): def sample_agents(): agent1 = create_function_agent( "Debater1", - "You are a debater who argues for the affirmative position. Be concise and direct." + "You are a debater who argues for the affirmative position. Be concise and direct.", ) agent2 = create_function_agent( "Debater2", - "You are a debater who argues for the negative position. Be concise and direct." + "You are a debater who argues for the negative position. Be concise and direct.", ) return [agent1, agent2] @@ -64,7 +66,7 @@ def test_one_on_one_debate_multiple_loops(sample_agents, sample_task): assert result is not None assert isinstance(result, str) assert len(result) > 0 - + result_list = one_on_one_debate( max_loops=max_loops, task=sample_task, @@ -80,7 +82,9 @@ def test_one_on_one_debate_multiple_loops(sample_agents, sample_task): raise -def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): +def test_one_on_one_debate_agent_alternation( + sample_agents, sample_task +): try: max_loops = 4 result = one_on_one_debate( @@ -92,7 +96,7 @@ def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): assert result is not None assert isinstance(result, list) assert len(result) == max_loops - + agent_names = [] for msg in result: if isinstance(msg, dict): @@ -105,8 +109,10 @@ def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): assert agent_names is not None assert len(agent_names) >= 0 if len(agent_names) > 0: - assert "Debater1" in agent_names or "Debater2" in agent_names - + assert ( + "Debater1" in agent_names or "Debater2" in agent_names + ) + if len(agent_names) > 0: debater1_count = agent_names.count("Debater1") debater2_count = agent_names.count("Debater2") @@ -137,7 +143,9 @@ def test_one_on_one_debate_with_image(sample_agents): raise -def test_one_on_one_debate_custom_output_types(sample_agents, sample_task): +def test_one_on_one_debate_custom_output_types( + sample_agents, sample_task +): try: output_type_checks = { "str": str, @@ -163,7 +171,9 @@ def test_one_on_one_debate_custom_output_types(sample_agents, sample_task): raise -def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): +def test_one_on_one_debate_list_output_structure( + sample_agents, sample_task +): try: result = one_on_one_debate( max_loops=2, @@ -174,7 +184,7 @@ def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): assert result is not None assert isinstance(result, list) assert len(result) == 2 - + for message in result: assert message is not None assert isinstance(message, (str, dict)) @@ -191,7 +201,9 @@ def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): def test_one_on_one_debate_too_few_agents(sample_task): try: single_agent = [create_function_agent("SoloAgent")] - with pytest.raises(ValueError, match="There must be exactly two agents"): + with pytest.raises( + ValueError, match="There must be exactly two agents" + ): one_on_one_debate( max_loops=1, task=sample_task, @@ -210,7 +222,9 @@ def test_one_on_one_debate_too_many_agents(sample_task): create_function_agent("Agent2"), create_function_agent("Agent3"), ] - with pytest.raises(ValueError, match="There must be exactly two agents"): + with pytest.raises( + ValueError, match="There must be exactly two agents" + ): one_on_one_debate( max_loops=1, task=sample_task, @@ -225,7 +239,9 @@ def test_one_on_one_debate_too_many_agents(sample_task): def test_one_on_one_debate_empty_agents(sample_task): try: empty_agents = [] - with pytest.raises(ValueError, match="There must be exactly two agents"): + with pytest.raises( + ValueError, match="There must be exactly two agents" + ): one_on_one_debate( max_loops=1, task=sample_task, @@ -265,7 +281,9 @@ def test_one_on_one_debate_none_task(sample_agents): raise -def test_one_on_one_debate_invalid_output_type(sample_agents, sample_task): +def test_one_on_one_debate_invalid_output_type( + sample_agents, sample_task +): try: with pytest.raises((ValueError, TypeError)): one_on_one_debate( @@ -289,7 +307,7 @@ def test_one_on_one_debate_zero_loops(sample_agents, sample_task): ) assert result is not None assert isinstance(result, str) - + result_list = one_on_one_debate( max_loops=0, task=sample_task, @@ -327,7 +345,9 @@ def test_one_on_one_debate_different_topics(sample_agents): raise -def test_one_on_one_debate_long_conversation(sample_agents, sample_task): +def test_one_on_one_debate_long_conversation( + sample_agents, sample_task +): try: max_loops = 5 result = one_on_one_debate( @@ -349,11 +369,11 @@ def test_one_on_one_debate_different_agent_personalities(): try: agent1 = create_function_agent( "Optimist", - "You are an optimist. Always see the positive side. Be concise." + "You are an optimist. Always see the positive side. Be concise.", ) agent2 = create_function_agent( "Pessimist", - "You are a pessimist. Always see the negative side. Be concise." + "You are a pessimist. Always see the negative side. Be concise.", ) agents = [agent1, agent2] task = "What is the future of AI?" @@ -366,7 +386,7 @@ def test_one_on_one_debate_different_agent_personalities(): assert result is not None assert isinstance(result, list) assert len(result) == 2 - + agent_names = [] for msg in result: if isinstance(msg, dict): @@ -379,14 +399,19 @@ def test_one_on_one_debate_different_agent_personalities(): assert agent_names is not None assert len(agent_names) >= 0 if len(agent_names) > 0: - assert "Optimist" in agent_names or "Pessimist" in agent_names + assert ( + "Optimist" in agent_names + or "Pessimist" in agent_names + ) logger.info("Different agent personalities test passed") except Exception as e: logger.error(f"Failed to test different personalities: {e}") raise -def test_one_on_one_debate_conversation_length_matches_loops(sample_agents, sample_task): +def test_one_on_one_debate_conversation_length_matches_loops( + sample_agents, sample_task +): try: for max_loops in [1, 2, 3, 4]: result = one_on_one_debate( @@ -404,7 +429,9 @@ def test_one_on_one_debate_conversation_length_matches_loops(sample_agents, samp raise -def test_one_on_one_debate_both_agents_participate(sample_agents, sample_task): +def test_one_on_one_debate_both_agents_participate( + sample_agents, sample_task +): try: result = one_on_one_debate( max_loops=2, @@ -415,7 +442,7 @@ def test_one_on_one_debate_both_agents_participate(sample_agents, sample_task): assert result is not None assert isinstance(result, list) assert len(result) == 2 - + roles = [] for msg in result: if isinstance(msg, dict) and "role" in msg: diff --git a/tests/structs/test_graph_workflow.py b/tests/structs/test_graph_workflow.py new file mode 100644 index 00000000..a00eecb0 --- /dev/null +++ b/tests/structs/test_graph_workflow.py @@ -0,0 +1,552 @@ +import pytest +from swarms.structs.graph_workflow import ( + GraphWorkflow, + Node, + NodeType, +) +from swarms.structs.agent import Agent + +try: + import rustworkx as rx + + RUSTWORKX_AVAILABLE = True +except ImportError: + RUSTWORKX_AVAILABLE = False + + +def create_test_agent(name: str, description: str = None) -> Agent: + """Create a real agent for testing""" + if description is None: + description = f"Test agent for {name} operations" + + return Agent( + agent_name=name, + agent_description=description, + model_name="gpt-4o-mini", + verbose=False, + print_on=False, + max_loops=1, + ) + + +def test_graph_workflow_basic_node_creation(): + """Test basic GraphWorkflow node creation with real agents""" + # Test basic node creation + agent = create_test_agent( + "TestAgent", "Test agent for node creation" + ) + node = Node.from_agent(agent) + assert node.id == "TestAgent" + assert node.type == NodeType.AGENT + assert node.agent == agent + + # Test node with custom id + node2 = Node(id="CustomID", type=NodeType.AGENT, agent=agent) + assert node2.id == "CustomID" + + +def test_graph_workflow_multi_agent_collaboration(): + """Test GraphWorkflow with multiple agents in a collaboration scenario""" + # Create specialized agents for a business analysis workflow + market_researcher = create_test_agent( + "Market-Researcher", + "Specialist in market analysis and trend identification", + ) + + data_analyst = create_test_agent( + "Data-Analyst", + "Expert in data processing and statistical analysis", + ) + + strategy_consultant = create_test_agent( + "Strategy-Consultant", + "Senior consultant for strategic planning and recommendations", + ) + + # Create workflow with linear execution path + workflow = GraphWorkflow(name="Business-Analysis-Workflow") + workflow.add_node(market_researcher) + workflow.add_node(data_analyst) + workflow.add_node(strategy_consultant) + + # Add edges to define execution order + workflow.add_edge("Market-Researcher", "Data-Analyst") + workflow.add_edge("Data-Analyst", "Strategy-Consultant") + + # Test workflow execution + result = workflow.run( + "Analyze market opportunities for AI in healthcare" + ) + assert result is not None + + +def test_graph_workflow_parallel_execution(): + """Test GraphWorkflow with parallel execution paths""" + # Create agents for parallel analysis + technical_analyst = create_test_agent( + "Technical-Analyst", + "Technical feasibility and implementation analysis", + ) + + market_analyst = create_test_agent( + "Market-Analyst", + "Market positioning and competitive analysis", + ) + + financial_analyst = create_test_agent( + "Financial-Analyst", "Financial modeling and ROI analysis" + ) + + risk_assessor = create_test_agent( + "Risk-Assessor", "Risk assessment and mitigation planning" + ) + + # Create workflow with parallel execution + workflow = GraphWorkflow(name="Parallel-Analysis-Workflow") + workflow.add_node(technical_analyst) + workflow.add_node(market_analyst) + workflow.add_node(financial_analyst) + workflow.add_node(risk_assessor) + + # Add edges for fan-out execution (one to many) + workflow.add_edges_from_source( + "Technical-Analyst", + ["Market-Analyst", "Financial-Analyst", "Risk-Assessor"], + ) + + # Test parallel execution + result = workflow.run( + "Evaluate feasibility of launching a new fintech platform" + ) + assert result is not None + + +def test_graph_workflow_complex_topology(): + """Test GraphWorkflow with complex node topology""" + # Create agents for a comprehensive product development workflow + product_manager = create_test_agent( + "Product-Manager", "Product strategy and roadmap management" + ) + + ux_designer = create_test_agent( + "UX-Designer", "User experience design and research" + ) + + backend_developer = create_test_agent( + "Backend-Developer", + "Backend system architecture and development", + ) + + frontend_developer = create_test_agent( + "Frontend-Developer", + "Frontend interface and user interaction development", + ) + + qa_engineer = create_test_agent( + "QA-Engineer", "Quality assurance and testing specialist" + ) + + devops_engineer = create_test_agent( + "DevOps-Engineer", "Deployment and infrastructure management" + ) + + # Create workflow with complex dependencies + workflow = GraphWorkflow(name="Product-Development-Workflow") + workflow.add_node(product_manager) + workflow.add_node(ux_designer) + workflow.add_node(backend_developer) + workflow.add_node(frontend_developer) + workflow.add_node(qa_engineer) + workflow.add_node(devops_engineer) + + # Define complex execution topology + workflow.add_edge("Product-Manager", "UX-Designer") + workflow.add_edge("UX-Designer", "Frontend-Developer") + workflow.add_edge("Product-Manager", "Backend-Developer") + workflow.add_edge("Backend-Developer", "QA-Engineer") + workflow.add_edge("Frontend-Developer", "QA-Engineer") + workflow.add_edge("QA-Engineer", "DevOps-Engineer") + + # Test complex workflow execution + result = workflow.run( + "Develop a comprehensive e-commerce platform with AI recommendations" + ) + assert result is not None + + +def test_graph_workflow_error_handling(): + """Test GraphWorkflow error handling and validation""" + # Test with empty workflow + workflow = GraphWorkflow() + result = workflow.run("Test task") + # Empty workflow should handle gracefully + assert result is not None + + # Test workflow compilation and caching + researcher = create_test_agent( + "Researcher", "Research specialist" + ) + workflow.add_node(researcher) + + # First run should compile + result1 = workflow.run("Research task") + assert result1 is not None + + # Second run should use cached compilation + result2 = workflow.run("Another research task") + assert result2 is not None + + +def test_graph_workflow_node_metadata(): + """Test GraphWorkflow with node metadata""" + # Create agents with different priorities and requirements + high_priority_agent = create_test_agent( + "High-Priority-Analyst", "High priority analysis specialist" + ) + + standard_agent = create_test_agent( + "Standard-Analyst", "Standard analysis agent" + ) + + # Create workflow and add nodes with metadata + workflow = GraphWorkflow(name="Metadata-Workflow") + workflow.add_node( + high_priority_agent, + metadata={"priority": "high", "timeout": 60}, + ) + workflow.add_node( + standard_agent, metadata={"priority": "normal", "timeout": 30} + ) + + # Add execution dependency + workflow.add_edge("High-Priority-Analyst", "Standard-Analyst") + + # Test execution with metadata + result = workflow.run( + "Analyze business requirements with different priorities" + ) + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_basic(backend): + """Test GraphWorkflow basic functionality with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow = GraphWorkflow( + name=f"Backend-Test-{backend}", backend=backend + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + assert len(workflow.nodes) == 2 + assert len(workflow.edges) == 1 + + result = workflow.run("Test task") + assert result is not None + assert "Agent1" in result + assert "Agent2" in result + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_parallel_execution(backend): + """Test parallel execution with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + coordinator = create_test_agent( + "Coordinator", "Coordinates tasks" + ) + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + + workflow = GraphWorkflow( + name=f"Parallel-Test-{backend}", backend=backend + ) + workflow.add_node(coordinator) + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + + workflow.add_edges_from_source( + coordinator, [analyst1, analyst2, analyst3] + ) + + workflow.compile() + assert len(workflow._sorted_layers) >= 1 + assert ( + len(workflow._sorted_layers[0]) == 1 + ) # Coordinator in first layer + + result = workflow.run("Analyze data in parallel") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_fan_in_pattern(backend): + """Test fan-in pattern with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + synthesizer = create_test_agent( + "Synthesizer", "Synthesizes results" + ) + + workflow = GraphWorkflow( + name=f"FanIn-Test-{backend}", backend=backend + ) + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + workflow.add_node(synthesizer) + + workflow.add_edges_to_target( + [analyst1, analyst2, analyst3], synthesizer + ) + + workflow.compile() + assert len(workflow._sorted_layers) >= 2 + assert synthesizer.agent_name in workflow.end_points + + result = workflow.run("Synthesize multiple analyses") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_parallel_chain(backend): + """Test parallel chain pattern with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + collector1 = create_test_agent("Collector1", "First collector") + collector2 = create_test_agent("Collector2", "Second collector") + processor1 = create_test_agent("Processor1", "First processor") + processor2 = create_test_agent("Processor2", "Second processor") + + workflow = GraphWorkflow( + name=f"ParallelChain-Test-{backend}", backend=backend + ) + workflow.add_node(collector1) + workflow.add_node(collector2) + workflow.add_node(processor1) + workflow.add_node(processor2) + + workflow.add_parallel_chain( + [collector1, collector2], [processor1, processor2] + ) + + workflow.compile() + assert len(workflow.edges) == 4 # 2x2 = 4 edges + + result = workflow.run("Process data from multiple collectors") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_complex_topology(backend): + """Test complex topology with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") for i in range(5) + ] + + workflow = GraphWorkflow( + name=f"Complex-Topology-{backend}", backend=backend + ) + for agent in agents: + workflow.add_node(agent) + + workflow.add_edge(agents[0], agents[1]) + workflow.add_edge(agents[0], agents[2]) + workflow.add_edge(agents[1], agents[3]) + workflow.add_edge(agents[2], agents[3]) + workflow.add_edge(agents[3], agents[4]) + + workflow.compile() + assert len(workflow._sorted_layers) >= 3 + + result = workflow.run("Execute complex workflow") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_validation(backend): + """Test workflow validation with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + isolated = create_test_agent("Isolated", "Isolated agent") + + workflow = GraphWorkflow( + name=f"Validation-Test-{backend}", backend=backend + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(isolated) + workflow.add_edge(agent1, agent2) + + validation = workflow.validate(auto_fix=False) + assert isinstance(validation, dict) + assert "is_valid" in validation + + validation_fixed = workflow.validate(auto_fix=True) + assert isinstance(validation_fixed, dict) + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_entry_end_points(backend): + """Test entry and end points with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "Entry agent") + agent2 = create_test_agent("Agent2", "Middle agent") + agent3 = create_test_agent("Agent3", "End agent") + + workflow = GraphWorkflow( + name=f"EntryEnd-Test-{backend}", backend=backend + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + workflow.auto_set_entry_points() + workflow.auto_set_end_points() + + assert agent1.agent_name in workflow.entry_points + assert agent3.agent_name in workflow.end_points + + +def test_graph_workflow_rustworkx_specific(): + """Test rustworkx-specific features""" + if not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow = GraphWorkflow( + name="Rustworkx-Specific-Test", backend="rustworkx" + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + assert ( + workflow.graph_backend.__class__.__name__ + == "RustworkxBackend" + ) + assert hasattr(workflow.graph_backend, "_node_id_to_index") + assert hasattr(workflow.graph_backend, "_index_to_node_id") + + workflow.compile() + assert len(workflow._sorted_layers) == 3 + + predecessors = list( + workflow.graph_backend.predecessors(agent2.agent_name) + ) + assert agent1.agent_name in predecessors + + descendants = workflow.graph_backend.descendants( + agent1.agent_name + ) + assert agent2.agent_name in descendants + assert agent3.agent_name in descendants + + result = workflow.run("Test rustworkx backend") + assert result is not None + + +def test_graph_workflow_rustworkx_large_scale(): + """Test rustworkx with larger workflow""" + if not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(10) + ] + + workflow = GraphWorkflow( + name="Rustworkx-Large-Scale", backend="rustworkx" + ) + for agent in agents: + workflow.add_node(agent) + + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + workflow.compile() + assert len(workflow._sorted_layers) == 10 + + result = workflow.run("Test large scale workflow") + assert result is not None + assert len(result) == 10 + + +def test_graph_workflow_rustworkx_agent_objects(): + """Test rustworkx with Agent objects directly in edges""" + if not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow = GraphWorkflow( + name="Rustworkx-Agent-Objects", backend="rustworkx" + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edges_from_source(agent1, [agent2, agent3]) + workflow.add_edges_to_target([agent2, agent3], agent1) + + workflow.compile() + assert len(workflow.edges) == 4 + + result = workflow.run("Test agent objects in edges") + assert result is not None + + +def test_graph_workflow_backend_fallback(): + """Test backend fallback when rustworkx unavailable""" + workflow = GraphWorkflow( + name="Fallback-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Test agent") + workflow.add_node(agent) + + if not RUSTWORKX_AVAILABLE: + assert ( + workflow.graph_backend.__class__.__name__ + == "NetworkXBackend" + ) + else: + assert ( + workflow.graph_backend.__class__.__name__ + == "RustworkxBackend" + ) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/structs/test_graph_workflow_comprehensive.py b/tests/structs/test_graph_workflow_comprehensive.py deleted file mode 100644 index 5cb6a4a6..00000000 --- a/tests/structs/test_graph_workflow_comprehensive.py +++ /dev/null @@ -1,225 +0,0 @@ -import pytest -from swarms.structs.graph_workflow import ( - GraphWorkflow, - Node, - NodeType, -) -from swarms.structs.agent import Agent - - -def create_test_agent(name: str, description: str = None) -> Agent: - """Create a real agent for testing""" - if description is None: - description = f"Test agent for {name} operations" - - return Agent( - agent_name=name, - agent_description=description, - model_name="gpt-4o-mini", - verbose=False, - print_on=False, - max_loops=1, - ) - - -def test_graph_workflow_basic_node_creation(): - """Test basic GraphWorkflow node creation with real agents""" - # Test basic node creation - agent = create_test_agent( - "TestAgent", "Test agent for node creation" - ) - node = Node.from_agent(agent) - assert node.id == "TestAgent" - assert node.type == NodeType.AGENT - assert node.agent == agent - - # Test node with custom id - node2 = Node(id="CustomID", type=NodeType.AGENT, agent=agent) - assert node2.id == "CustomID" - - -def test_graph_workflow_multi_agent_collaboration(): - """Test GraphWorkflow with multiple agents in a collaboration scenario""" - # Create specialized agents for a business analysis workflow - market_researcher = create_test_agent( - "Market-Researcher", - "Specialist in market analysis and trend identification", - ) - - data_analyst = create_test_agent( - "Data-Analyst", - "Expert in data processing and statistical analysis", - ) - - strategy_consultant = create_test_agent( - "Strategy-Consultant", - "Senior consultant for strategic planning and recommendations", - ) - - # Create workflow with linear execution path - workflow = GraphWorkflow(name="Business-Analysis-Workflow") - workflow.add_node(market_researcher) - workflow.add_node(data_analyst) - workflow.add_node(strategy_consultant) - - # Add edges to define execution order - workflow.add_edge("Market-Researcher", "Data-Analyst") - workflow.add_edge("Data-Analyst", "Strategy-Consultant") - - # Test workflow execution - result = workflow.run( - "Analyze market opportunities for AI in healthcare" - ) - assert result is not None - - -def test_graph_workflow_parallel_execution(): - """Test GraphWorkflow with parallel execution paths""" - # Create agents for parallel analysis - technical_analyst = create_test_agent( - "Technical-Analyst", - "Technical feasibility and implementation analysis", - ) - - market_analyst = create_test_agent( - "Market-Analyst", - "Market positioning and competitive analysis", - ) - - financial_analyst = create_test_agent( - "Financial-Analyst", "Financial modeling and ROI analysis" - ) - - risk_assessor = create_test_agent( - "Risk-Assessor", "Risk assessment and mitigation planning" - ) - - # Create workflow with parallel execution - workflow = GraphWorkflow(name="Parallel-Analysis-Workflow") - workflow.add_node(technical_analyst) - workflow.add_node(market_analyst) - workflow.add_node(financial_analyst) - workflow.add_node(risk_assessor) - - # Add edges for fan-out execution (one to many) - workflow.add_edges_from_source( - "Technical-Analyst", - ["Market-Analyst", "Financial-Analyst", "Risk-Assessor"], - ) - - # Test parallel execution - result = workflow.run( - "Evaluate feasibility of launching a new fintech platform" - ) - assert result is not None - - -def test_graph_workflow_complex_topology(): - """Test GraphWorkflow with complex node topology""" - # Create agents for a comprehensive product development workflow - product_manager = create_test_agent( - "Product-Manager", "Product strategy and roadmap management" - ) - - ux_designer = create_test_agent( - "UX-Designer", "User experience design and research" - ) - - backend_developer = create_test_agent( - "Backend-Developer", - "Backend system architecture and development", - ) - - frontend_developer = create_test_agent( - "Frontend-Developer", - "Frontend interface and user interaction development", - ) - - qa_engineer = create_test_agent( - "QA-Engineer", "Quality assurance and testing specialist" - ) - - devops_engineer = create_test_agent( - "DevOps-Engineer", "Deployment and infrastructure management" - ) - - # Create workflow with complex dependencies - workflow = GraphWorkflow(name="Product-Development-Workflow") - workflow.add_node(product_manager) - workflow.add_node(ux_designer) - workflow.add_node(backend_developer) - workflow.add_node(frontend_developer) - workflow.add_node(qa_engineer) - workflow.add_node(devops_engineer) - - # Define complex execution topology - workflow.add_edge("Product-Manager", "UX-Designer") - workflow.add_edge("UX-Designer", "Frontend-Developer") - workflow.add_edge("Product-Manager", "Backend-Developer") - workflow.add_edge("Backend-Developer", "QA-Engineer") - workflow.add_edge("Frontend-Developer", "QA-Engineer") - workflow.add_edge("QA-Engineer", "DevOps-Engineer") - - # Test complex workflow execution - result = workflow.run( - "Develop a comprehensive e-commerce platform with AI recommendations" - ) - assert result is not None - - -def test_graph_workflow_error_handling(): - """Test GraphWorkflow error handling and validation""" - # Test with empty workflow - workflow = GraphWorkflow() - result = workflow.run("Test task") - # Empty workflow should handle gracefully - assert result is not None - - # Test workflow compilation and caching - researcher = create_test_agent( - "Researcher", "Research specialist" - ) - workflow.add_node(researcher) - - # First run should compile - result1 = workflow.run("Research task") - assert result1 is not None - - # Second run should use cached compilation - result2 = workflow.run("Another research task") - assert result2 is not None - - -def test_graph_workflow_node_metadata(): - """Test GraphWorkflow with node metadata""" - # Create agents with different priorities and requirements - high_priority_agent = create_test_agent( - "High-Priority-Analyst", "High priority analysis specialist" - ) - - standard_agent = create_test_agent( - "Standard-Analyst", "Standard analysis agent" - ) - - # Create workflow and add nodes with metadata - workflow = GraphWorkflow(name="Metadata-Workflow") - workflow.add_node( - high_priority_agent, - metadata={"priority": "high", "timeout": 60}, - ) - workflow.add_node( - standard_agent, metadata={"priority": "normal", "timeout": 30} - ) - - # Add execution dependency - workflow.add_edge("High-Priority-Analyst", "Standard-Analyst") - - # Test execution with metadata - result = workflow.run( - "Analyze business requirements with different priorities" - ) - assert result is not None - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/tests/structs/test_multi_agent_debate.py b/tests/structs/test_multi_agent_debate.py index 12737b3b..806a94c6 100644 --- a/tests/structs/test_multi_agent_debate.py +++ b/tests/structs/test_multi_agent_debate.py @@ -18,8 +18,10 @@ from swarms.structs.agent import Agent def create_function_agent(name: str, system_prompt: str = None): if system_prompt is None: - system_prompt = f"You are {name}. Provide concise and direct responses." - + system_prompt = ( + f"You are {name}. Provide concise and direct responses." + ) + agent = Agent( agent_name=name, agent_description=f"Test agent {name}", @@ -34,12 +36,10 @@ def create_function_agent(name: str, system_prompt: str = None): @pytest.fixture def sample_two_agents(): agent1 = create_function_agent( - "Agent1", - "You are Agent1. Provide concise responses." + "Agent1", "You are Agent1. Provide concise responses." ) agent2 = create_function_agent( - "Agent2", - "You are Agent2. Provide concise responses." + "Agent2", "You are Agent2. Provide concise responses." ) return [agent1, agent2] @@ -71,7 +71,9 @@ def test_one_on_one_debate_initialization(sample_two_agents): assert debate.output_type == "str-all-except-first" logger.info("OneOnOneDebate initialization test passed") except Exception as e: - logger.error(f"Failed to test OneOnOneDebate initialization: {e}") + logger.error( + f"Failed to test OneOnOneDebate initialization: {e}" + ) raise @@ -95,7 +97,9 @@ def test_one_on_one_debate_run(sample_two_agents, sample_task): raise -def test_one_on_one_debate_wrong_number_of_agents(sample_three_agents, sample_task): +def test_one_on_one_debate_wrong_number_of_agents( + sample_three_agents, sample_task +): try: debate = OneOnOneDebate( max_loops=2, @@ -104,13 +108,19 @@ def test_one_on_one_debate_wrong_number_of_agents(sample_three_agents, sample_ta ) with pytest.raises(ValueError, match="exactly two agents"): debate.run(sample_task) - logger.info("OneOnOneDebate wrong number of agents test passed") + logger.info( + "OneOnOneDebate wrong number of agents test passed" + ) except Exception as e: - logger.error(f"Failed to test OneOnOneDebate wrong number of agents: {e}") + logger.error( + f"Failed to test OneOnOneDebate wrong number of agents: {e}" + ) raise -def test_one_on_one_debate_output_types(sample_two_agents, sample_task): +def test_one_on_one_debate_output_types( + sample_two_agents, sample_task +): try: assert sample_two_agents is not None assert sample_task is not None @@ -133,7 +143,9 @@ def test_one_on_one_debate_output_types(sample_two_agents, sample_task): assert isinstance(result, str) logger.info("OneOnOneDebate output types test passed") except Exception as e: - logger.error(f"Failed to test OneOnOneDebate output types: {e}") + logger.error( + f"Failed to test OneOnOneDebate output types: {e}" + ) raise @@ -175,13 +187,19 @@ def test_expert_panel_discussion_initialization(sample_three_agents): assert panel.max_rounds == 2 assert len(panel.agents) == 3 assert panel.moderator is not None - logger.info("ExpertPanelDiscussion initialization test passed") + logger.info( + "ExpertPanelDiscussion initialization test passed" + ) except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion initialization: {e}") + logger.error( + f"Failed to test ExpertPanelDiscussion initialization: {e}" + ) raise -def test_expert_panel_discussion_run(sample_three_agents, sample_task): +def test_expert_panel_discussion_run( + sample_three_agents, sample_task +): try: moderator = create_function_agent("Moderator") assert moderator is not None @@ -217,15 +235,23 @@ def test_expert_panel_discussion_insufficient_agents(sample_task): output_type="str-all-except-first", ) assert panel is not None - with pytest.raises(ValueError, match="At least two expert agents"): + with pytest.raises( + ValueError, match="At least two expert agents" + ): panel.run(sample_task) - logger.info("ExpertPanelDiscussion insufficient agents test passed") + logger.info( + "ExpertPanelDiscussion insufficient agents test passed" + ) except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion insufficient agents: {e}") + logger.error( + f"Failed to test ExpertPanelDiscussion insufficient agents: {e}" + ) raise -def test_expert_panel_discussion_no_moderator(sample_three_agents, sample_task): +def test_expert_panel_discussion_no_moderator( + sample_three_agents, sample_task +): try: panel = ExpertPanelDiscussion( max_rounds=2, @@ -233,11 +259,15 @@ def test_expert_panel_discussion_no_moderator(sample_three_agents, sample_task): moderator=None, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="moderator agent is required"): + with pytest.raises( + ValueError, match="moderator agent is required" + ): panel.run(sample_task) logger.info("ExpertPanelDiscussion no moderator test passed") except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion no moderator: {e}") + logger.error( + f"Failed to test ExpertPanelDiscussion no moderator: {e}" + ) raise @@ -257,7 +287,9 @@ def test_round_table_discussion_initialization(sample_three_agents): assert round_table.facilitator is not None logger.info("RoundTableDiscussion initialization test passed") except Exception as e: - logger.error(f"Failed to test RoundTableDiscussion initialization: {e}") + logger.error( + f"Failed to test RoundTableDiscussion initialization: {e}" + ) raise @@ -292,15 +324,23 @@ def test_round_table_discussion_insufficient_agents(sample_task): facilitator=facilitator, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="At least two participants"): + with pytest.raises( + ValueError, match="At least two participants" + ): round_table.run(sample_task) - logger.info("RoundTableDiscussion insufficient agents test passed") + logger.info( + "RoundTableDiscussion insufficient agents test passed" + ) except Exception as e: - logger.error(f"Failed to test RoundTableDiscussion insufficient agents: {e}") + logger.error( + f"Failed to test RoundTableDiscussion insufficient agents: {e}" + ) raise -def test_round_table_discussion_no_facilitator(sample_three_agents, sample_task): +def test_round_table_discussion_no_facilitator( + sample_three_agents, sample_task +): try: round_table = RoundTableDiscussion( max_cycles=2, @@ -308,11 +348,15 @@ def test_round_table_discussion_no_facilitator(sample_three_agents, sample_task) facilitator=None, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="facilitator agent is required"): + with pytest.raises( + ValueError, match="facilitator agent is required" + ): round_table.run(sample_task) logger.info("RoundTableDiscussion no facilitator test passed") except Exception as e: - logger.error(f"Failed to test RoundTableDiscussion no facilitator: {e}") + logger.error( + f"Failed to test RoundTableDiscussion no facilitator: {e}" + ) raise @@ -338,7 +382,9 @@ def test_interview_series_initialization(): assert interview.follow_up_depth == 1 logger.info("InterviewSeries initialization test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries initialization: {e}") + logger.error( + f"Failed to test InterviewSeries initialization: {e}" + ) raise @@ -378,11 +424,15 @@ def test_interview_series_no_interviewer(sample_task): follow_up_depth=1, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both interviewer and interviewee"): + with pytest.raises( + ValueError, match="Both interviewer and interviewee" + ): interview.run(sample_task) logger.info("InterviewSeries no interviewer test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries no interviewer: {e}") + logger.error( + f"Failed to test InterviewSeries no interviewer: {e}" + ) raise @@ -396,11 +446,15 @@ def test_interview_series_no_interviewee(sample_task): follow_up_depth=1, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both interviewer and interviewee"): + with pytest.raises( + ValueError, match="Both interviewer and interviewee" + ): interview.run(sample_task) logger.info("InterviewSeries no interviewee test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries no interviewee: {e}") + logger.error( + f"Failed to test InterviewSeries no interviewee: {e}" + ) raise @@ -425,13 +479,18 @@ def test_interview_series_default_questions(sample_task): assert len(result) >= 0 logger.info("InterviewSeries default questions test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries default questions: {e}") + logger.error( + f"Failed to test InterviewSeries default questions: {e}" + ) raise def test_peer_review_process_initialization(): try: - reviewers = [create_function_agent("Reviewer1"), create_function_agent("Reviewer2")] + reviewers = [ + create_function_agent("Reviewer1"), + create_function_agent("Reviewer2"), + ] assert reviewers is not None assert len(reviewers) == 2 assert reviewers[0] is not None @@ -450,13 +509,18 @@ def test_peer_review_process_initialization(): assert peer_review.review_rounds == 2 logger.info("PeerReviewProcess initialization test passed") except Exception as e: - logger.error(f"Failed to test PeerReviewProcess initialization: {e}") + logger.error( + f"Failed to test PeerReviewProcess initialization: {e}" + ) raise def test_peer_review_process_run(sample_task): try: - reviewers = [create_function_agent("Reviewer1"), create_function_agent("Reviewer2")] + reviewers = [ + create_function_agent("Reviewer1"), + create_function_agent("Reviewer2"), + ] assert reviewers is not None assert len(reviewers) == 2 author = create_function_agent("Author") @@ -491,7 +555,9 @@ def test_peer_review_process_no_reviewers(sample_task): peer_review.run(sample_task) logger.info("PeerReviewProcess no reviewers test passed") except Exception as e: - logger.error(f"Failed to test PeerReviewProcess no reviewers: {e}") + logger.error( + f"Failed to test PeerReviewProcess no reviewers: {e}" + ) raise @@ -504,11 +570,15 @@ def test_peer_review_process_no_author(sample_task): review_rounds=2, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="author agent is required"): + with pytest.raises( + ValueError, match="author agent is required" + ): peer_review.run(sample_task) logger.info("PeerReviewProcess no author test passed") except Exception as e: - logger.error(f"Failed to test PeerReviewProcess no author: {e}") + logger.error( + f"Failed to test PeerReviewProcess no author: {e}" + ) raise @@ -529,7 +599,9 @@ def test_mediation_session_initialization(sample_two_agents): assert mediation.max_sessions == 2 logger.info("MediationSession initialization test passed") except Exception as e: - logger.error(f"Failed to test MediationSession initialization: {e}") + logger.error( + f"Failed to test MediationSession initialization: {e}" + ) raise @@ -567,13 +639,19 @@ def test_mediation_session_insufficient_parties(sample_task): ) with pytest.raises(ValueError, match="At least two parties"): mediation.run(sample_task) - logger.info("MediationSession insufficient parties test passed") + logger.info( + "MediationSession insufficient parties test passed" + ) except Exception as e: - logger.error(f"Failed to test MediationSession insufficient parties: {e}") + logger.error( + f"Failed to test MediationSession insufficient parties: {e}" + ) raise -def test_mediation_session_no_mediator(sample_two_agents, sample_task): +def test_mediation_session_no_mediator( + sample_two_agents, sample_task +): try: mediation = MediationSession( parties=sample_two_agents, @@ -581,11 +659,15 @@ def test_mediation_session_no_mediator(sample_two_agents, sample_task): max_sessions=2, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="mediator agent is required"): + with pytest.raises( + ValueError, match="mediator agent is required" + ): mediation.run(sample_task) logger.info("MediationSession no mediator test passed") except Exception as e: - logger.error(f"Failed to test MediationSession no mediator: {e}") + logger.error( + f"Failed to test MediationSession no mediator: {e}" + ) raise @@ -608,7 +690,9 @@ def test_brainstorming_session_initialization(sample_three_agents): assert brainstorming.build_on_ideas is True logger.info("BrainstormingSession initialization test passed") except Exception as e: - logger.error(f"Failed to test BrainstormingSession initialization: {e}") + logger.error( + f"Failed to test BrainstormingSession initialization: {e}" + ) raise @@ -646,15 +730,23 @@ def test_brainstorming_session_insufficient_participants(sample_task): build_on_ideas=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="At least two participants"): + with pytest.raises( + ValueError, match="At least two participants" + ): brainstorming.run(sample_task) - logger.info("BrainstormingSession insufficient participants test passed") + logger.info( + "BrainstormingSession insufficient participants test passed" + ) except Exception as e: - logger.error(f"Failed to test BrainstormingSession insufficient participants: {e}") + logger.error( + f"Failed to test BrainstormingSession insufficient participants: {e}" + ) raise -def test_brainstorming_session_no_facilitator(sample_three_agents, sample_task): +def test_brainstorming_session_no_facilitator( + sample_three_agents, sample_task +): try: brainstorming = BrainstormingSession( participants=sample_three_agents, @@ -663,11 +755,15 @@ def test_brainstorming_session_no_facilitator(sample_three_agents, sample_task): build_on_ideas=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="facilitator agent is required"): + with pytest.raises( + ValueError, match="facilitator agent is required" + ): brainstorming.run(sample_task) logger.info("BrainstormingSession no facilitator test passed") except Exception as e: - logger.error(f"Failed to test BrainstormingSession no facilitator: {e}") + logger.error( + f"Failed to test BrainstormingSession no facilitator: {e}" + ) raise @@ -699,7 +795,9 @@ def test_trial_simulation_initialization(): assert trial.phases == ["opening", "closing"] logger.info("TrialSimulation initialization test passed") except Exception as e: - logger.error(f"Failed to test TrialSimulation initialization: {e}") + logger.error( + f"Failed to test TrialSimulation initialization: {e}" + ) raise @@ -746,7 +844,9 @@ def test_trial_simulation_no_prosecution(sample_task): trial.run(sample_task) logger.info("TrialSimulation no prosecution test passed") except Exception as e: - logger.error(f"Failed to test TrialSimulation no prosecution: {e}") + logger.error( + f"Failed to test TrialSimulation no prosecution: {e}" + ) raise @@ -774,7 +874,9 @@ def test_trial_simulation_default_phases(sample_task): assert len(result) >= 0 logger.info("TrialSimulation default phases test passed") except Exception as e: - logger.error(f"Failed to test TrialSimulation default phases: {e}") + logger.error( + f"Failed to test TrialSimulation default phases: {e}" + ) raise @@ -797,7 +899,9 @@ def test_council_meeting_initialization(sample_three_agents): assert council.require_consensus is False logger.info("CouncilMeeting initialization test passed") except Exception as e: - logger.error(f"Failed to test CouncilMeeting initialization: {e}") + logger.error( + f"Failed to test CouncilMeeting initialization: {e}" + ) raise @@ -835,15 +939,21 @@ def test_council_meeting_insufficient_members(sample_task): require_consensus=False, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="At least two council members"): + with pytest.raises( + ValueError, match="At least two council members" + ): council.run(sample_task) logger.info("CouncilMeeting insufficient members test passed") except Exception as e: - logger.error(f"Failed to test CouncilMeeting insufficient members: {e}") + logger.error( + f"Failed to test CouncilMeeting insufficient members: {e}" + ) raise -def test_council_meeting_no_chairperson(sample_three_agents, sample_task): +def test_council_meeting_no_chairperson( + sample_three_agents, sample_task +): try: council = CouncilMeeting( council_members=sample_three_agents, @@ -852,11 +962,15 @@ def test_council_meeting_no_chairperson(sample_three_agents, sample_task): require_consensus=False, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="chairperson agent is required"): + with pytest.raises( + ValueError, match="chairperson agent is required" + ): council.run(sample_task) logger.info("CouncilMeeting no chairperson test passed") except Exception as e: - logger.error(f"Failed to test CouncilMeeting no chairperson: {e}") + logger.error( + f"Failed to test CouncilMeeting no chairperson: {e}" + ) raise @@ -880,7 +994,9 @@ def test_mentorship_session_initialization(): assert mentorship.include_feedback is True logger.info("MentorshipSession initialization test passed") except Exception as e: - logger.error(f"Failed to test MentorshipSession initialization: {e}") + logger.error( + f"Failed to test MentorshipSession initialization: {e}" + ) raise @@ -918,11 +1034,15 @@ def test_mentorship_session_no_mentor(sample_task): include_feedback=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both mentor and mentee"): + with pytest.raises( + ValueError, match="Both mentor and mentee" + ): mentorship.run(sample_task) logger.info("MentorshipSession no mentor test passed") except Exception as e: - logger.error(f"Failed to test MentorshipSession no mentor: {e}") + logger.error( + f"Failed to test MentorshipSession no mentor: {e}" + ) raise @@ -936,11 +1056,15 @@ def test_mentorship_session_no_mentee(sample_task): include_feedback=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both mentor and mentee"): + with pytest.raises( + ValueError, match="Both mentor and mentee" + ): mentorship.run(sample_task) logger.info("MentorshipSession no mentee test passed") except Exception as e: - logger.error(f"Failed to test MentorshipSession no mentee: {e}") + logger.error( + f"Failed to test MentorshipSession no mentee: {e}" + ) raise @@ -963,7 +1087,9 @@ def test_negotiation_session_initialization(sample_two_agents): assert negotiation.include_concessions is True logger.info("NegotiationSession initialization test passed") except Exception as e: - logger.error(f"Failed to test NegotiationSession initialization: {e}") + logger.error( + f"Failed to test NegotiationSession initialization: {e}" + ) raise @@ -1003,13 +1129,19 @@ def test_negotiation_session_insufficient_parties(sample_task): ) with pytest.raises(ValueError, match="At least two parties"): negotiation.run(sample_task) - logger.info("NegotiationSession insufficient parties test passed") + logger.info( + "NegotiationSession insufficient parties test passed" + ) except Exception as e: - logger.error(f"Failed to test NegotiationSession insufficient parties: {e}") + logger.error( + f"Failed to test NegotiationSession insufficient parties: {e}" + ) raise -def test_negotiation_session_no_mediator(sample_two_agents, sample_task): +def test_negotiation_session_no_mediator( + sample_two_agents, sample_task +): try: negotiation = NegotiationSession( parties=sample_two_agents, @@ -1018,15 +1150,21 @@ def test_negotiation_session_no_mediator(sample_two_agents, sample_task): include_concessions=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="mediator agent is required"): + with pytest.raises( + ValueError, match="mediator agent is required" + ): negotiation.run(sample_task) logger.info("NegotiationSession no mediator test passed") except Exception as e: - logger.error(f"Failed to test NegotiationSession no mediator: {e}") + logger.error( + f"Failed to test NegotiationSession no mediator: {e}" + ) raise -def test_negotiation_session_without_concessions(sample_two_agents, sample_task): +def test_negotiation_session_without_concessions( + sample_two_agents, sample_task +): try: mediator = create_function_agent("Mediator") assert mediator is not None @@ -1043,13 +1181,19 @@ def test_negotiation_session_without_concessions(sample_two_agents, sample_task) assert result is not None assert isinstance(result, str) assert len(result) >= 0 - logger.info("NegotiationSession without concessions test passed") + logger.info( + "NegotiationSession without concessions test passed" + ) except Exception as e: - logger.error(f"Failed to test NegotiationSession without concessions: {e}") + logger.error( + f"Failed to test NegotiationSession without concessions: {e}" + ) raise -def test_one_on_one_debate_multiple_loops(sample_two_agents, sample_task): +def test_one_on_one_debate_multiple_loops( + sample_two_agents, sample_task +): try: assert sample_two_agents is not None debate = OneOnOneDebate( @@ -1064,11 +1208,15 @@ def test_one_on_one_debate_multiple_loops(sample_two_agents, sample_task): assert len(result) >= 0 logger.info("OneOnOneDebate multiple loops test passed") except Exception as e: - logger.error(f"Failed to test OneOnOneDebate multiple loops: {e}") + logger.error( + f"Failed to test OneOnOneDebate multiple loops: {e}" + ) raise -def test_expert_panel_discussion_output_types(sample_three_agents, sample_task): +def test_expert_panel_discussion_output_types( + sample_three_agents, sample_task +): try: moderator = create_function_agent("Moderator") assert moderator is not None @@ -1093,5 +1241,7 @@ def test_expert_panel_discussion_output_types(sample_three_agents, sample_task): assert isinstance(result, str) logger.info("ExpertPanelDiscussion output types test passed") except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion output types: {e}") - raise \ No newline at end of file + logger.error( + f"Failed to test ExpertPanelDiscussion output types: {e}" + ) + raise From 54acb0a129a9a8752e388fbcf3b2b347e4d72cb3 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 21 Nov 2025 14:43:22 -0800 Subject: [PATCH 28/42] [FEAT][rustworkx integration into GraphWorkflow] [New Examples] [Update GraphWorkflow docs] --- docs/swarms/structs/graph_workflow.md | 208 +++++++++++++++++++++++--- 1 file changed, 191 insertions(+), 17 deletions(-) diff --git a/docs/swarms/structs/graph_workflow.md b/docs/swarms/structs/graph_workflow.md index ef48d8d0..f0182be3 100644 --- a/docs/swarms/structs/graph_workflow.md +++ b/docs/swarms/structs/graph_workflow.md @@ -12,6 +12,7 @@ Key features: |------------------------|-----------------------------------------------------------------------------------------------| | **Agent-based nodes** | Each node represents an agent that can process tasks | | **Directed graph structure** | Edges define the flow of data between agents | +| **Dual backend support** | Choose between NetworkX (compatibility) or Rustworkx (performance) backends | | **Parallel execution** | Multiple agents can run simultaneously within layers | | **Automatic compilation** | Optimizes workflow structure for efficient execution | | **Rich visualization** | Generate visual representations using Graphviz | @@ -25,37 +26,40 @@ graph TB subgraph "GraphWorkflow Architecture" A[GraphWorkflow] --> B[Node Collection] A --> C[Edge Collection] - A --> D[NetworkX Graph] + A --> D[Graph Backend] A --> E[Execution Engine] B --> F[Agent Nodes] C --> G[Directed Edges] - D --> H[Topological Sort] - E --> I[Parallel Execution] - E --> J[Layer Processing] + D --> H[NetworkX Backend] + D --> I[Rustworkx Backend] + D --> J[Topological Sort] + E --> K[Parallel Execution] + E --> L[Layer Processing] subgraph "Node Types" - F --> K[Agent Node] - K --> L[Agent Instance] - K --> M[Node Metadata] + F --> M[Agent Node] + M --> N[Agent Instance] + M --> O[Node Metadata] end subgraph "Edge Types" - G --> N[Simple Edge] - G --> O[Fan-out Edge] - G --> P[Fan-in Edge] - G --> Q[Parallel Chain] + G --> P[Simple Edge] + G --> Q[Fan-out Edge] + G --> R[Fan-in Edge] + G --> S[Parallel Chain] end subgraph "Execution Patterns" - I --> R[Thread Pool] - I --> S[Concurrent Futures] - J --> T[Layer-by-layer] - J --> U[Dependency Resolution] + K --> T[Thread Pool] + K --> U[Concurrent Futures] + L --> V[Layer-by-layer] + L --> W[Dependency Resolution] end end ``` + ## Class Reference | Parameter | Type | Description | Default | @@ -71,6 +75,70 @@ graph TB | `task` | `Optional[str]` | The task to be executed by the workflow | `None` | | `auto_compile` | `bool` | Whether to automatically compile the workflow | `True` | | `verbose` | `bool` | Whether to enable detailed logging | `False` | +| `backend` | `str` | Graph backend to use ("networkx" or "rustworkx") | `"networkx"` | + +## Graph Backends + +GraphWorkflow supports two graph backend implementations, each with different performance characteristics: + +### NetworkX Backend (Default) + +The **NetworkX** backend is the default and most widely compatible option. It provides: + +| Feature | Description | +|---------------------|---------------------------------------------------------| +| βœ… Full compatibility | Works out of the box with no additional dependencies | +| βœ… Mature ecosystem | Well-tested and stable | +| βœ… Rich features | Comprehensive graph algorithms and operations | +| βœ… Python-native | Pure Python implementation | + +**Use NetworkX when:** + +- You need maximum compatibility + +- Working with small to medium-sized graphs (< 1000 nodes) + +- You want zero additional dependencies + +### Rustworkx Backend (High Performance) + +The **Rustworkx** backend provides significant performance improvements for large graphs: + +| Feature | Description | +|--------------------|-----------------------------------------------------------------| +| ⚑ High performance| Rust-based implementation for faster operations | +| ⚑ Memory efficient| Optimized for large-scale graphs | +| ⚑ Scalable | Better performance with graphs containing 1000+ nodes | +| ⚑ Same API | Drop-in replacement with identical interface | + +**Use Rustworkx when:** + +- Working with large graphs (1000+ nodes) + +- Performance is critical + +- You can install additional dependencies + +**Installation:** +```bash +pip install rustworkx +``` + +**Note:** If rustworkx is not installed and you specify `backend="rustworkx"`, GraphWorkflow will automatically fall back to NetworkX with a warning. + +### Backend Selection + +Both backends implement the same `GraphBackend` interface, ensuring complete API compatibility. You can switch between backends without changing your code: + +```python +# Use NetworkX (default) +workflow = GraphWorkflow(backend="networkx") + +# Use Rustworkx for better performance +workflow = GraphWorkflow(backend="rustworkx") +``` + +The backend choice is transparent to the rest of the API - all methods work identically regardless of which backend is used. ### Core Methods @@ -455,7 +523,7 @@ Constructs a workflow from a list of agents and connections. | `entry_points` | `List[str]` | List of entry point node IDs | `None` | | `end_points` | `List[str]` | List of end point node IDs | `None` | | `task` | `str` | Task to be executed by the workflow | `None` | -| `**kwargs` | `Any` | Additional keyword arguments | `{}` | +| `**kwargs` | `Any` | Additional keyword arguments (e.g., `backend`, `verbose`, `auto_compile`) | `{}` | **Returns:** @@ -464,6 +532,7 @@ Constructs a workflow from a list of agents and connections. **Example:** ```python +# Using NetworkX backend (default) workflow = GraphWorkflow.from_spec( agents=[agent1, agent2, agent3], edges=[ @@ -473,10 +542,56 @@ workflow = GraphWorkflow.from_spec( ], task="Analyze market data" ) + +# Using Rustworkx backend for better performance +workflow = GraphWorkflow.from_spec( + agents=[agent1, agent2, agent3], + edges=[ + ("agent1", "agent2"), + ("agent2", "agent3"), + ], + task="Analyze market data", + backend="rustworkx" # Specify backend via kwargs +) ``` ## Examples +### Using Rustworkx Backend for Performance + +```python +from swarms import Agent, GraphWorkflow + +# Create agents +research_agent = Agent( + agent_name="ResearchAgent", + model_name="gpt-4", + max_loops=1 +) + +analysis_agent = Agent( + agent_name="AnalysisAgent", + model_name="gpt-4", + max_loops=1 +) + +# Build workflow with rustworkx backend for better performance +workflow = GraphWorkflow( + name="High-Performance-Workflow", + backend="rustworkx" # Use rustworkx backend +) + +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) +workflow.add_edge("ResearchAgent", "AnalysisAgent") + +# Execute - backend is transparent to the API +results = workflow.run("What are the latest trends in AI?") +print(results) +``` + +**Note:** Make sure to install rustworkx first: `pip install rustworkx` + ### Basic Sequential Workflow ```python @@ -667,6 +782,46 @@ loaded_workflow = GraphWorkflow.load_from_file( new_results = loaded_workflow.run("Continue with quantum cryptography analysis") ``` +### Large-Scale Workflow with Rustworkx + +```python +from swarms import Agent, GraphWorkflow + +# Create a large workflow with many agents +# Rustworkx backend provides better performance for large graphs +workflow = GraphWorkflow( + name="Large-Scale-Workflow", + backend="rustworkx", # Use rustworkx for better performance + verbose=True +) + +# Create many agents (e.g., for parallel data processing) +agents = [] +for i in range(50): + agent = Agent( + agent_name=f"Processor{i}", + model_name="gpt-4", + max_loops=1 + ) + agents.append(agent) + workflow.add_node(agent) + +# Create complex interconnections +# Rustworkx handles this efficiently +for i in range(0, 50, 10): + source_agents = [f"Processor{j}" for j in range(i, min(i+10, 50))] + target_agents = [f"Processor{j}" for j in range(i+10, min(i+20, 50))] + if target_agents: + workflow.add_parallel_chain(source_agents, target_agents) + +# Compile and execute +workflow.compile() +status = workflow.get_compilation_status() +print(f"Compiled workflow with {status['cached_layers_count']} layers") + +results = workflow.run("Process large dataset in parallel") +``` + ### Advanced Pattern Detection ```python @@ -770,7 +925,8 @@ The `GraphWorkflow` class provides a powerful and flexible framework for orchest |-----------------|--------------------------------------------------------------------------------------------------| | **Scalability** | Supports workflows with hundreds of agents through efficient parallel execution | | **Flexibility** | Multiple connection patterns (sequential, fan-out, fan-in, parallel chains) | -| **Performance** | Automatic compilation and optimization for faster execution | +| **Performance** | Automatic compilation and optimization for faster execution; rustworkx backend for large-scale graphs | +| **Backend Choice** | Choose between NetworkX (compatibility) or Rustworkx (performance) based on your needs | | **Visualization** | Rich visual representations for workflow understanding and debugging | | **Persistence** | Complete serialization and deserialization capabilities | | **Error Handling** | Comprehensive error handling and recovery mechanisms | @@ -793,10 +949,28 @@ The `GraphWorkflow` class provides a powerful and flexible framework for orchest |---------------------------------------|------------------------------------------------------------------| | **Use meaningful agent names** | Helps with debugging and visualization | | **Leverage parallel patterns** | Use fan-out and fan-in for better performance | +| **Choose the right backend** | Use rustworkx for large graphs (1000+ nodes), networkx for smaller graphs | | **Compile workflows** | Always compile before execution for optimal performance | | **Monitor execution** | Use verbose mode and status reporting for debugging | | **Save important workflows** | Use serialization for workflow persistence | | **Handle errors gracefully** | Implement proper error handling and recovery | | **Visualize complex workflows** | Use visualization to understand and debug workflows | +### Backend Performance Considerations + +When choosing between NetworkX and Rustworkx backends: + +| Graph Size | Recommended Backend | Reason | +|------------|-------------------|--------| +| < 100 nodes | NetworkX | Minimal overhead, no extra dependencies | +| 100-1000 nodes | NetworkX or Rustworkx | Both perform well, choose based on dependency preferences | +| 1000+ nodes | Rustworkx | Significant performance benefits for large graphs | +| Very large graphs (10k+ nodes) | Rustworkx | Essential for acceptable performance | + +**Performance Tips:** +- Rustworkx provides 2-10x speedup for topological operations on large graphs +- Both backends support the same features and API +- You can switch backends without code changes +- Rustworkx uses less memory for large graphs + The GraphWorkflow system represents a significant advancement in multi-agent orchestration, providing the tools needed to build complex, scalable, and maintainable AI workflows. \ No newline at end of file From e5c29609124288f3c9fcad24a2e29678884695b8 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 21 Nov 2025 14:56:12 -0800 Subject: [PATCH 29/42] example guide on graph workflow with agentic patterns --- docs/mkdocs.yml | 1 + .../graphworkflow_rustworkx_patterns.md | 1479 +++++++++++++++++ 2 files changed, 1480 insertions(+) create mode 100644 docs/swarms/examples/graphworkflow_rustworkx_patterns.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 1619374f..5b70d5f6 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -399,6 +399,7 @@ nav: - SwarmRouter Example: "swarms/examples/swarm_router.md" - MultiAgentRouter Minimal Example: "swarms/examples/multi_agent_router_minimal.md" - ConcurrentWorkflow Example: "swarms/examples/concurrent_workflow.md" + - Multi-Agentic Patterns with GraphWorkflow: "swarms/examples/graphworkflow_rustworkx_patterns.md" - Mixture of Agents Example: "swarms/examples/moa_example.md" - Unique Swarms: "swarms/examples/unique_swarms.md" - Agents as Tools: "swarms/examples/agents_as_tools.md" diff --git a/docs/swarms/examples/graphworkflow_rustworkx_patterns.md b/docs/swarms/examples/graphworkflow_rustworkx_patterns.md new file mode 100644 index 00000000..5d392c49 --- /dev/null +++ b/docs/swarms/examples/graphworkflow_rustworkx_patterns.md @@ -0,0 +1,1479 @@ +# GraphWorkflow with Rustworkx: Complete Patterns Guide + +A comprehensive guide to implementing various agentic patterns using GraphWorkflow with the rustworkx backend for optimal performance. + +## Table of Contents + +1. [Introduction](#introduction) +2. [Basic Patterns](#basic-patterns) +3. [Hierarchical Patterns](#hierarchical-patterns) +4. [Concurrent/Parallel Patterns](#concurrentparallel-patterns) +5. [Majority Voting Patterns](#majority-voting-patterns) +6. [Fan-Out/Fan-In Patterns](#fan-outfan-in-patterns) +7. [Sequential Patterns](#sequential-patterns) +8. [Advanced Patterns](#advanced-patterns) +9. [Performance Optimization](#performance-optimization) + +## Introduction + +GraphWorkflow with rustworkx backend provides a high-performance framework for orchestrating complex multi-agent workflows. This guide demonstrates how to implement various agentic patterns that are commonly used in production systems. + +### Why Rustworkx? + +- **Performance**: 2-10x faster for large graphs (1000+ nodes) +- **Memory Efficiency**: Optimized for large-scale workflows +- **Scalability**: Better performance with complex graph operations +- **API Compatibility**: Drop-in replacement for NetworkX backend + +### Installation + +```bash +pip install rustworkx +``` + +## Basic Patterns + +### Simple Sequential Workflow + +The most basic pattern - agents execute one after another in sequence. + +**Architecture Diagram:** + +```mermaid +graph LR + A[ResearchAgent] --> B[AnalysisAgent] + B --> C[SynthesisAgent] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Create agents +research_agent = Agent( + agent_name="ResearchAgent", + model_name="gpt-4o-mini", + max_loops=1, +) + +analysis_agent = Agent( + agent_name="AnalysisAgent", + model_name="gpt-4o-mini", + max_loops=1, +) + +synthesis_agent = Agent( + agent_name="SynthesisAgent", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Build sequential workflow +workflow = GraphWorkflow( + name="Sequential-Workflow", + backend="rustworkx", + verbose=True, +) + +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) +workflow.add_node(synthesis_agent) + +# Create sequential chain +workflow.add_edge(research_agent, analysis_agent) +workflow.add_edge(analysis_agent, synthesis_agent) + +# Execute +results = workflow.run("Analyze the impact of AI on healthcare") +``` + +**Use Case**: When each agent needs the previous agent's output before proceeding. + +## Hierarchical Patterns + +### Multi-Level Hierarchy + +Hierarchical patterns organize agents into levels, where higher-level agents coordinate lower-level agents. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Executive] --> B[Research-Head] + A --> C[Analysis-Head] + B --> D[Researcher-1] + B --> E[Researcher-2] + C --> F[Analyst-1] + C --> G[Analyst-2] + D --> H[Synthesis-Agent] + E --> H + F --> H + G --> H +``` + +```python +from swarms import Agent, GraphWorkflow + +# Level 1: Executive/Coordinator +executive = Agent( + agent_name="Executive", + agent_description="Coordinates overall strategy", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Level 2: Department Heads +research_head = Agent( + agent_name="Research-Head", + agent_description="Leads research department", + model_name="gpt-4o-mini", + max_loops=1, +) + +analysis_head = Agent( + agent_name="Analysis-Head", + agent_description="Leads analysis department", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Level 3: Specialists +researcher_1 = Agent( + agent_name="Researcher-1", + agent_description="Market research specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +researcher_2 = Agent( + agent_name="Researcher-2", + agent_description="Technical research specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_1 = Agent( + agent_name="Analyst-1", + agent_description="Data analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_2 = Agent( + agent_name="Analyst-2", + agent_description="Financial analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Level 4: Synthesis +synthesis_agent = Agent( + agent_name="Synthesis-Agent", + agent_description="Synthesizes all outputs", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Build hierarchical workflow +workflow = GraphWorkflow( + name="Hierarchical-Workflow", + backend="rustworkx", + verbose=True, +) + +# Add all agents +all_agents = [ + executive, + research_head, + analysis_head, + researcher_1, + researcher_2, + analyst_1, + analyst_2, + synthesis_agent, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Level 1 -> Level 2 +workflow.add_edge(executive, research_head) +workflow.add_edge(executive, analysis_head) + +# Level 2 -> Level 3 +workflow.add_edges_from_source( + research_head, + [researcher_1, researcher_2], +) + +workflow.add_edges_from_source( + analysis_head, + [analyst_1, analyst_2], +) + +# Level 3 -> Level 4 (convergence) +workflow.add_edges_to_target( + [researcher_1, researcher_2, analyst_1, analyst_2], + synthesis_agent, +) + +# Execute +results = workflow.run("Conduct a comprehensive market analysis") +``` + +**Use Case**: Organizational structures, multi-level decision making, hierarchical data processing. + +### Tree Structure Hierarchy + +A tree-like hierarchy where one root agent branches into multiple specialized branches. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Root-Coordinator] --> B[Tech-Branch-Head] + A --> C[Business-Branch-Head] + B --> D[Tech-Specialist-1] + B --> E[Tech-Specialist-2] + C --> F[Business-Specialist-1] + C --> G[Business-Specialist-2] + D --> H[Final-Synthesis] + E --> H + F --> H + G --> H +``` + +```python +from swarms import Agent, GraphWorkflow + +# Root agent +root_coordinator = Agent( + agent_name="Root-Coordinator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 1: Technical Analysis +tech_branch_head = Agent( + agent_name="Tech-Branch-Head", + model_name="gpt-4o-mini", + max_loops=1, +) + +tech_specialist_1 = Agent( + agent_name="Tech-Specialist-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +tech_specialist_2 = Agent( + agent_name="Tech-Specialist-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 2: Business Analysis +business_branch_head = Agent( + agent_name="Business-Branch-Head", + model_name="gpt-4o-mini", + max_loops=1, +) + +business_specialist_1 = Agent( + agent_name="Business-Specialist-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +business_specialist_2 = Agent( + agent_name="Business-Specialist-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Convergence point +final_synthesis = Agent( + agent_name="Final-Synthesis", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Tree-Hierarchy-Workflow", + backend="rustworkx", +) + +all_agents = [ + root_coordinator, + tech_branch_head, + tech_specialist_1, + tech_specialist_2, + business_branch_head, + business_specialist_1, + business_specialist_2, + final_synthesis, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Root -> Branch heads +workflow.add_edge(root_coordinator, tech_branch_head) +workflow.add_edge(root_coordinator, business_branch_head) + +# Branch heads -> Specialists +workflow.add_edges_from_source( + tech_branch_head, + [tech_specialist_1, tech_specialist_2], +) + +workflow.add_edges_from_source( + business_branch_head, + [business_specialist_1, business_specialist_2], +) + +# All specialists -> Final synthesis +workflow.add_edges_to_target( + [ + tech_specialist_1, + tech_specialist_2, + business_specialist_1, + business_specialist_2, + ], + final_synthesis, +) + +results = workflow.run("Analyze a technology startup from multiple perspectives") +``` + +## Concurrent/Parallel Patterns + +### Full Parallel Execution + +All agents execute simultaneously without dependencies. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Parallel-Agent-1] --> D[Collector] + B[Parallel-Agent-2] --> D + C[Parallel-Agent-3] --> D +``` + +```python +from swarms import Agent, GraphWorkflow + +# Create independent parallel agents +parallel_agent_1 = Agent( + agent_name="Parallel-Agent-1", + agent_description="Independent analysis 1", + model_name="gpt-4o-mini", + max_loops=1, +) + +parallel_agent_2 = Agent( + agent_name="Parallel-Agent-2", + agent_description="Independent analysis 2", + model_name="gpt-4o-mini", + max_loops=1, +) + +parallel_agent_3 = Agent( + agent_name="Parallel-Agent-3", + agent_description="Independent analysis 3", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Convergence agent +collector = Agent( + agent_name="Collector", + agent_description="Collects all parallel results", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Full-Parallel-Workflow", + backend="rustworkx", +) + +for agent in [parallel_agent_1, parallel_agent_2, parallel_agent_3, collector]: + workflow.add_node(agent) + +# All parallel agents feed into collector +workflow.add_edges_to_target( + [parallel_agent_1, parallel_agent_2, parallel_agent_3], + collector, +) + +results = workflow.run("Analyze three different aspects of renewable energy") +``` + +**Use Case**: Independent analyses, parallel data collection, multi-perspective evaluation. + +### Layer-Based Parallel Execution + +Agents execute in layers, with all agents in a layer running in parallel. + +**Architecture Diagram:** + +```mermaid +graph TB + subgraph Layer1["Layer 1: Data Collection"] + A1[Data-Collector-1] + A2[Data-Collector-2] + A3[Data-Collector-3] + end + subgraph Layer2["Layer 2: Analysis"] + B1[Analyst-1] + B2[Analyst-2] + B3[Analyst-3] + end + subgraph Layer3["Layer 3: Synthesis"] + C[Synthesis] + end + A1 --> B1 + A1 --> B2 + A1 --> B3 + A2 --> B1 + A2 --> B2 + A2 --> B3 + A3 --> B1 + A3 --> B2 + A3 --> B3 + B1 --> C + B2 --> C + B3 --> C +``` + +```python +from swarms import Agent, GraphWorkflow + +# Layer 1: Data Collection (parallel) +data_collector_1 = Agent( + agent_name="Data-Collector-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +data_collector_2 = Agent( + agent_name="Data-Collector-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +data_collector_3 = Agent( + agent_name="Data-Collector-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Layer 2: Analysis (parallel, depends on Layer 1) +analyst_1 = Agent( + agent_name="Analyst-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_2 = Agent( + agent_name="Analyst-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_3 = Agent( + agent_name="Analyst-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Layer 3: Synthesis (depends on Layer 2) +synthesis = Agent( + agent_name="Synthesis", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Layer-Based-Parallel-Workflow", + backend="rustworkx", +) + +all_agents = [ + data_collector_1, + data_collector_2, + data_collector_3, + analyst_1, + analyst_2, + analyst_3, + synthesis, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Layer 1 -> Layer 2: Full mesh connection +workflow.add_parallel_chain( + [data_collector_1, data_collector_2, data_collector_3], + [analyst_1, analyst_2, analyst_3], +) + +# Layer 2 -> Layer 3: Convergence +workflow.add_edges_to_target( + [analyst_1, analyst_2, analyst_3], + synthesis, +) + +results = workflow.run("Process and analyze data in parallel layers") +``` + +**Use Case**: Pipeline processing, multi-stage analysis, batch processing workflows. + +## Majority Voting Patterns + +### Simple Majority Vote + +Multiple agents vote on a decision, with a majority vote aggregator. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Voter-1] --> F[Vote-Aggregator] + B[Voter-2] --> F + C[Voter-3] --> F + D[Voter-4] --> F + E[Voter-5] --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Voting agents +voter_1 = Agent( + agent_name="Voter-1", + agent_description="Provides vote/opinion 1", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_2 = Agent( + agent_name="Voter-2", + agent_description="Provides vote/opinion 2", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_3 = Agent( + agent_name="Voter-3", + agent_description="Provides vote/opinion 3", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_4 = Agent( + agent_name="Voter-4", + agent_description="Provides vote/opinion 4", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_5 = Agent( + agent_name="Voter-5", + agent_description="Provides vote/opinion 5", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Vote aggregator (implements majority voting logic) +vote_aggregator = Agent( + agent_name="Vote-Aggregator", + agent_description="Aggregates votes and determines majority decision", + system_prompt="""You are a vote aggregator. Analyze all the votes/opinions provided + and determine the majority consensus. Provide a clear summary of the majority decision.""", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Majority-Voting-Workflow", + backend="rustworkx", +) + +all_agents = [voter_1, voter_2, voter_3, voter_4, voter_5, vote_aggregator] + +for agent in all_agents: + workflow.add_node(agent) + +# All voters -> Aggregator +workflow.add_edges_to_target( + [voter_1, voter_2, voter_3, voter_4, voter_5], + vote_aggregator, +) + +results = workflow.run( + "Should we invest in renewable energy stocks? Provide your vote and reasoning." +) +``` + +**Use Case**: Decision making, consensus building, quality assurance, validation. + +### Weighted Majority Vote + +Similar to simple majority vote but with weighted voters. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Expert-Voter-1
Weight: 2x] --> F[Weighted-Aggregator] + B[Expert-Voter-2
Weight: 2x] --> F + C[Regular-Voter-1
Weight: 1x] --> F + D[Regular-Voter-2
Weight: 1x] --> F + E[Regular-Voter-3
Weight: 1x] --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Expert voters (higher weight) +expert_voter_1 = Agent( + agent_name="Expert-Voter-1", + agent_description="Senior expert with high weight", + model_name="gpt-4o-mini", + max_loops=1, +) + +expert_voter_2 = Agent( + agent_name="Expert-Voter-2", + agent_description="Senior expert with high weight", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Regular voters (standard weight) +regular_voter_1 = Agent( + agent_name="Regular-Voter-1", + agent_description="Regular voter", + model_name="gpt-4o-mini", + max_loops=1, +) + +regular_voter_2 = Agent( + agent_name="Regular-Voter-2", + agent_description="Regular voter", + model_name="gpt-4o-mini", + max_loops=1, +) + +regular_voter_3 = Agent( + agent_name="Regular-Voter-3", + agent_description="Regular voter", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Weighted aggregator +weighted_aggregator = Agent( + agent_name="Weighted-Aggregator", + agent_description="Aggregates votes with expert weighting", + system_prompt="""You are a weighted vote aggregator. Expert voters (Expert-Voter-1, Expert-Voter-2) + have 2x weight compared to regular voters. Analyze all votes and determine the weighted majority decision.""", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Weighted-Majority-Voting-Workflow", + backend="rustworkx", +) + +all_agents = [ + expert_voter_1, + expert_voter_2, + regular_voter_1, + regular_voter_2, + regular_voter_3, + weighted_aggregator, +] + +for agent in all_agents: + workflow.add_node(agent) + +# All voters -> Weighted aggregator +workflow.add_edges_to_target( + [ + expert_voter_1, + expert_voter_2, + regular_voter_1, + regular_voter_2, + regular_voter_3, + ], + weighted_aggregator, +) + +results = workflow.run( + "Evaluate a business proposal. Experts should provide detailed analysis, regular voters provide standard evaluation." +) +``` + +## Fan-Out/Fan-In Patterns + +### Simple Fan-Out + +One source agent distributes work to multiple target agents. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Coordinator] --> B[Specialist-1] + A --> C[Specialist-2] + A --> D[Specialist-3] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Source agent +coordinator = Agent( + agent_name="Coordinator", + agent_description="Distributes tasks to specialists", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Target agents (specialists) +specialist_1 = Agent( + agent_name="Specialist-1", + agent_description="Technical specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +specialist_2 = Agent( + agent_name="Specialist-2", + agent_description="Business specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +specialist_3 = Agent( + agent_name="Specialist-3", + agent_description="Financial specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Fan-Out-Workflow", + backend="rustworkx", +) + +for agent in [coordinator, specialist_1, specialist_2, specialist_3]: + workflow.add_node(agent) + +# Fan-out: One source to multiple targets +workflow.add_edges_from_source( + coordinator, + [specialist_1, specialist_2, specialist_3], +) + +results = workflow.run("Analyze a startup from technical, business, and financial perspectives") +``` + +**Use Case**: Task distribution, parallel specialization, workload splitting. + +### Simple Fan-In + +Multiple source agents converge to a single target agent. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Analyst-1] --> D[Synthesis] + B[Analyst-2] --> D + C[Analyst-3] --> D +``` + +```python +from swarms import Agent, GraphWorkflow + +# Source agents +analyst_1 = Agent( + agent_name="Analyst-1", + agent_description="Technical analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_2 = Agent( + agent_name="Analyst-2", + agent_description="Market analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_3 = Agent( + agent_name="Analyst-3", + agent_description="Financial analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Target agent (synthesis) +synthesis = Agent( + agent_name="Synthesis", + agent_description="Synthesizes all analyses", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Fan-In-Workflow", + backend="rustworkx", +) + +for agent in [analyst_1, analyst_2, analyst_3, synthesis]: + workflow.add_node(agent) + +# Fan-in: Multiple sources to one target +workflow.add_edges_to_target( + [analyst_1, analyst_2, analyst_3], + synthesis, +) + +results = workflow.run("Provide comprehensive analysis from multiple perspectives") +``` + +**Use Case**: Result aggregation, synthesis, convergence of parallel work. + +### Fan-Out Followed by Fan-In + +A common pattern: distribute work, then aggregate results. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Coordinator] --> B[Worker-1] + A --> C[Worker-2] + A --> D[Worker-3] + A --> E[Worker-4] + B --> F[Aggregator] + C --> F + D --> F + E --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Initial coordinator +coordinator = Agent( + agent_name="Coordinator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Parallel workers +worker_1 = Agent( + agent_name="Worker-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +worker_2 = Agent( + agent_name="Worker-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +worker_3 = Agent( + agent_name="Worker-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +worker_4 = Agent( + agent_name="Worker-4", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Final aggregator +aggregator = Agent( + agent_name="Aggregator", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Fan-Out-Fan-In-Workflow", + backend="rustworkx", +) + +all_agents = [ + coordinator, + worker_1, + worker_2, + worker_3, + worker_4, + aggregator, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Fan-out: Coordinator -> Workers +workflow.add_edges_from_source( + coordinator, + [worker_1, worker_2, worker_3, worker_4], +) + +# Fan-in: Workers -> Aggregator +workflow.add_edges_to_target( + [worker_1, worker_2, worker_3, worker_4], + aggregator, +) + +results = workflow.run("Distribute research tasks and synthesize results") +``` + +**Use Case**: Map-reduce patterns, parallel processing with aggregation, distributed analysis. + +## Sequential Patterns + +### Linear Chain + +Simple sequential execution where each agent depends on the previous one. + +**Architecture Diagram:** + +```mermaid +graph LR + A[Agent-1] --> B[Agent-2] + B --> C[Agent-3] + C --> D[Agent-4] + D --> E[Agent-5] +``` + +```python +from swarms import Agent, GraphWorkflow + +agents = [ + Agent( + agent_name=f"Agent-{i+1}", + model_name="gpt-4o-mini", + max_loops=1, + ) + for i in range(5) +] + +workflow = GraphWorkflow( + name="Linear-Chain-Workflow", + backend="rustworkx", +) + +for agent in agents: + workflow.add_node(agent) + +# Create linear chain +for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + +results = workflow.run("Process data through a linear pipeline") +``` + +### Sequential with Branching + +Sequential flow with conditional branching. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Initial] --> B[Branch-1-Agent] + A --> C[Branch-2-Agent] + B --> D[Branch-1-Continuation] + C --> E[Branch-2-Continuation] + D --> F[Final] + E --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Initial agent +initial = Agent( + agent_name="Initial", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 1 +branch_1_agent = Agent( + agent_name="Branch-1-Agent", + model_name="gpt-4o-mini", + max_loops=1, +) + +branch_1_continuation = Agent( + agent_name="Branch-1-Continuation", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 2 +branch_2_agent = Agent( + agent_name="Branch-2-Agent", + model_name="gpt-4o-mini", + max_loops=1, +) + +branch_2_continuation = Agent( + agent_name="Branch-2-Continuation", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Convergence +final = Agent( + agent_name="Final", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Sequential-Branching-Workflow", + backend="rustworkx", +) + +all_agents = [ + initial, + branch_1_agent, + branch_1_continuation, + branch_2_agent, + branch_2_continuation, + final, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Initial -> Branches +workflow.add_edge(initial, branch_1_agent) +workflow.add_edge(initial, branch_2_agent) + +# Branch continuations +workflow.add_edge(branch_1_agent, branch_1_continuation) +workflow.add_edge(branch_2_agent, branch_2_continuation) + +# Convergence +workflow.add_edge(branch_1_continuation, final) +workflow.add_edge(branch_2_continuation, final) + +results = workflow.run("Process through branching paths") +``` + +## Advanced Patterns + +### Pipeline with Validation + +Sequential pipeline with validation checkpoints. + +**Architecture Diagram:** + +```mermaid +graph LR + A[Data-Collector] --> B[Validator-1] + B --> C[Processor] + C --> D[Validator-2] + D --> E[Finalizer] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Pipeline stages +data_collector = Agent( + agent_name="Data-Collector", + model_name="gpt-4o-mini", + max_loops=1, +) + +validator_1 = Agent( + agent_name="Validator-1", + agent_description="Validates data quality", + model_name="gpt-4o-mini", + max_loops=1, +) + +processor = Agent( + agent_name="Processor", + model_name="gpt-4o-mini", + max_loops=1, +) + +validator_2 = Agent( + agent_name="Validator-2", + agent_description="Validates processing results", + model_name="gpt-4o-mini", + max_loops=1, +) + +finalizer = Agent( + agent_name="Finalizer", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Pipeline-With-Validation", + backend="rustworkx", +) + +for agent in [data_collector, validator_1, processor, validator_2, finalizer]: + workflow.add_node(agent) + +# Sequential pipeline with validation checkpoints +workflow.add_edge(data_collector, validator_1) +workflow.add_edge(validator_1, processor) +workflow.add_edge(processor, validator_2) +workflow.add_edge(validator_2, finalizer) + +results = workflow.run("Process data with quality checkpoints") +``` + +### Multi-Stage Review Process + +Multiple review stages before final approval. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Submitter] --> B[Reviewer-1A] + A --> C[Reviewer-1B] + B --> D[Stage-1-Aggregator] + C --> D + D --> E[Reviewer-2A] + D --> F[Reviewer-2B] + E --> G[Stage-2-Aggregator] + F --> G + G --> H[Approver] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Initial submission +submitter = Agent( + agent_name="Submitter", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Review stage 1 (parallel reviewers) +reviewer_1a = Agent( + agent_name="Reviewer-1A", + model_name="gpt-4o-mini", + max_loops=1, +) + +reviewer_1b = Agent( + agent_name="Reviewer-1B", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Stage 1 aggregator +stage_1_aggregator = Agent( + agent_name="Stage-1-Aggregator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Review stage 2 +reviewer_2a = Agent( + agent_name="Reviewer-2A", + model_name="gpt-4o-mini", + max_loops=1, +) + +reviewer_2b = Agent( + agent_name="Reviewer-2B", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Stage 2 aggregator +stage_2_aggregator = Agent( + agent_name="Stage-2-Aggregator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Final approver +approver = Agent( + agent_name="Approver", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Multi-Stage-Review", + backend="rustworkx", +) + +all_agents = [ + submitter, + reviewer_1a, + reviewer_1b, + stage_1_aggregator, + reviewer_2a, + reviewer_2b, + stage_2_aggregator, + approver, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Stage 1: Parallel review +workflow.add_edge(submitter, reviewer_1a) +workflow.add_edge(submitter, reviewer_1b) +workflow.add_edges_to_target([reviewer_1a, reviewer_1b], stage_1_aggregator) + +# Stage 2: Parallel review +workflow.add_edge(stage_1_aggregator, reviewer_2a) +workflow.add_edge(stage_1_aggregator, reviewer_2b) +workflow.add_edges_to_target([reviewer_2a, reviewer_2b], stage_2_aggregator) + +# Final approval +workflow.add_edge(stage_2_aggregator, approver) + +results = workflow.run("Review and approve a proposal through multiple stages") +``` + +### Circular/Iterative Pattern + +Agents form a cycle for iterative refinement. + +**Architecture Diagram:** + +```mermaid +graph LR + A[Agent-1] --> B[Agent-2] + B --> C[Agent-3] + C --> D[Exit-Checker] + D -.->|Iterate| A +``` + +```python +from swarms import Agent, GraphWorkflow + +# Create iterative refinement agents +agent_1 = Agent( + agent_name="Agent-1", + agent_description="First refinement stage", + model_name="gpt-4o-mini", + max_loops=1, +) + +agent_2 = Agent( + agent_name="Agent-2", + agent_description="Second refinement stage", + model_name="gpt-4o-mini", + max_loops=1, +) + +agent_3 = Agent( + agent_name="Agent-3", + agent_description="Third refinement stage", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Exit condition checker +exit_checker = Agent( + agent_name="Exit-Checker", + agent_description="Checks if refinement is complete", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Iterative-Refinement", + backend="rustworkx", + max_loops=3, # Limit iterations +) + +for agent in [agent_1, agent_2, agent_3, exit_checker]: + workflow.add_node(agent) + +# Circular refinement +workflow.add_edge(agent_1, agent_2) +workflow.add_edge(agent_2, agent_3) +workflow.add_edge(agent_3, exit_checker) +# Note: For true iteration, you'd need to add edge back to agent_1 +# This is a simplified example + +results = workflow.run("Iteratively refine a document") +``` + +### Star Pattern + +Central hub agent coordinates with multiple spoke agents. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Hub] --> B[Spoke-1] + A --> C[Spoke-2] + A --> D[Spoke-3] + A --> E[Spoke-4] + B --> A + C --> A + D --> A + E --> A +``` + +```python +from swarms import Agent, GraphWorkflow + +# Central hub +hub = Agent( + agent_name="Hub", + agent_description="Central coordinator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Spoke agents +spoke_1 = Agent( + agent_name="Spoke-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +spoke_2 = Agent( + agent_name="Spoke-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +spoke_3 = Agent( + agent_name="Spoke-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +spoke_4 = Agent( + agent_name="Spoke-4", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Star-Pattern-Workflow", + backend="rustworkx", +) + +for agent in [hub, spoke_1, spoke_2, spoke_3, spoke_4]: + workflow.add_node(agent) + +# Hub -> Spokes (fan-out) +workflow.add_edges_from_source( + hub, + [spoke_1, spoke_2, spoke_3, spoke_4], +) + +# Spokes -> Hub (fan-in) +workflow.add_edges_to_target( + [spoke_1, spoke_2, spoke_3, spoke_4], + hub, +) + +results = workflow.run("Coordinate work through a central hub") +``` + +## Performance Optimization + +### Compilation Best Practices + +Always compile workflows before execution for optimal performance: + +```python +workflow = GraphWorkflow( + name="Optimized-Workflow", + backend="rustworkx", + auto_compile=True, # Automatic compilation +) + +# Or manually compile +workflow.compile() + +# Check compilation status +status = workflow.get_compilation_status() +print(f"Compiled: {status['is_compiled']}") +print(f"Layers: {status['cached_layers_count']}") +``` + +### Large-Scale Workflow Tips + +For workflows with 100+ agents: + +1. **Use rustworkx backend** for better performance +2. **Compile before execution** to cache topological layers +3. **Use parallel patterns** to maximize throughput +4. **Monitor compilation status** to ensure optimization + +```python +# Large-scale workflow example +workflow = GraphWorkflow( + name="Large-Scale-Workflow", + backend="rustworkx", # Essential for large graphs + auto_compile=True, + verbose=True, # Monitor performance +) + +# Add many agents... +# Use parallel patterns for efficiency + +# Check performance +status = workflow.get_compilation_status() +print(f"Max workers: {status['max_workers']}") +print(f"Layers: {status['cached_layers_count']}") +``` + +### Visualization for Debugging + +Visualize workflows to understand structure and optimize: + +```python +# Generate visualization +output_file = workflow.visualize( + format="png", + show_summary=True, # Shows parallel patterns + view=True, +) + +# Or simple text visualization +workflow.visualize_simple() +``` + +## Conclusion + +GraphWorkflow with rustworkx backend provides a powerful framework for implementing complex multi-agent patterns. Key takeaways: + +1. **Choose the right pattern** for your use case +2. **Use rustworkx** for large-scale workflows (100+ nodes) +3. **Leverage parallel patterns** for performance +4. **Compile workflows** before execution +5. **Visualize** to understand and debug workflows + +For more examples, see the [rustworkx examples directory](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent/graphworkflow_examples/rustworkx_examples). From 93b17bd64633918793d3d58562b3dfff0537f1a1 Mon Sep 17 00:00:00 2001 From: Aksh Parekh Date: Fri, 21 Nov 2025 18:46:22 -0800 Subject: [PATCH 30/42] [BUG-FIX] Test Update for build_agent -> build_llm_agent --- tests/structs/test_auto_swarms_builder.py | 35 ++++++++++++++++------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/tests/structs/test_auto_swarms_builder.py b/tests/structs/test_auto_swarms_builder.py index a1e9085a..768256e1 100644 --- a/tests/structs/test_auto_swarms_builder.py +++ b/tests/structs/test_auto_swarms_builder.py @@ -41,21 +41,27 @@ def test_initialization(): def test_agent_building(): - """Test building individual agents""" + """Test building individual agents from specs""" print_separator() print("Testing Agent Building") try: swarm = AutoSwarmBuilder() - agent = swarm.build_agent( - agent_name="TestAgent", - agent_description="A test agent", - agent_system_prompt="You are a test agent", - max_loops=1, - ) + specs = { + "agents": [ + { + "agent_name": "TestAgent", + "description": "A test agent", + "system_prompt": "You are a test agent", + "max_loops": 1, + } + ] + } + agents = swarm.create_agents_from_specs(specs) + agent = agents[0] print("βœ“ Built agent with configuration:") print(f" - Name: {agent.agent_name}") - print(f" - Description: {agent.description}") + print(f" - Description: {agent.agent_description}") print(f" - Max loops: {agent.max_loops}") print("βœ“ Agent building test passed") return agent @@ -69,18 +75,25 @@ def test_agent_creation(): print_separator() print("Testing Agent Creation from Task") try: + import json + swarm = AutoSwarmBuilder( name="ResearchSwarm", description="A swarm for research tasks", ) task = "Research the latest developments in quantum computing" - agents = swarm._create_agents(task) + # create_agents returns a JSON string + agent_specs_json = swarm.create_agents(task) + # Parse JSON string to dict + agent_specs = json.loads(agent_specs_json) + # Convert specs to actual Agent objects + agents = swarm.create_agents_from_specs(agent_specs) print("βœ“ Created agents for research task:") for i, agent in enumerate(agents, 1): print(f" Agent {i}:") print(f" - Name: {agent.agent_name}") - print(f" - Description: {agent.description}") + print(f" - Description: {agent.agent_description}") print(f"βœ“ Created {len(agents)} agents successfully") return agents except Exception as e: @@ -155,7 +168,7 @@ def test_error_handling(): # Test with invalid agent configuration print("Testing invalid agent configuration...") try: - swarm.build_agent("", "", "") + swarm.create_agents_from_specs({"agents": [{"agent_name": ""}]}) print( "βœ— Should have raised an error for empty agent configuration" ) From c2ae1ec33681a5f570cec9c9f9fbee1d4d48e657 Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Sat, 22 Nov 2025 01:34:29 -0800 Subject: [PATCH 31/42] added ire test --- tests/structs/test_i_agent.py | 86 +++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 tests/structs/test_i_agent.py diff --git a/tests/structs/test_i_agent.py b/tests/structs/test_i_agent.py new file mode 100644 index 00000000..3edf9a8e --- /dev/null +++ b/tests/structs/test_i_agent.py @@ -0,0 +1,86 @@ +import pytest + +from swarms.agents.i_agent import IterativeReflectiveExpansion + + +def test_ire_agent_initialization(): + """Test IRE agent initialization with default parameters""" + agent = IterativeReflectiveExpansion() + + assert agent is not None + assert agent.agent_name == "General-Reasoning-Agent" + assert agent.max_iterations == 5 + assert agent.output_type == "dict" + assert agent.agent is not None + + +def test_ire_agent_custom_initialization(): + """Test IRE agent initialization with custom parameters""" + agent = IterativeReflectiveExpansion( + agent_name="Custom-IRE-Agent", + description="A custom reasoning agent", + max_iterations=3, + model_name="gpt-4o", + output_type="string", + ) + + assert agent.agent_name == "Custom-IRE-Agent" + assert agent.description == "A custom reasoning agent" + assert agent.max_iterations == 3 + assert agent.output_type == "string" + + +def test_ire_agent_execution(): + """Test IRE agent execution with a simple problem""" + agent = IterativeReflectiveExpansion( + agent_name="Test-IRE-Agent", + model_name="gpt-4o", + max_iterations=2, + output_type="dict", + ) + + # Test with a simple reasoning task + task = "What are three main benefits of renewable energy?" + result = agent.run(task) + + # Result should not be None + assert result is not None + # Result should be dict or string based on output_type + assert isinstance(result, (str, dict)) + + +def test_ire_agent_generate_hypotheses(): + """Test IRE agent hypothesis generation""" + agent = IterativeReflectiveExpansion( + agent_name="Hypothesis-Test-Agent", + max_iterations=1, + ) + + task = "How can we reduce carbon emissions?" + hypotheses = agent.generate_initial_hypotheses(task) + + assert hypotheses is not None + assert isinstance(hypotheses, list) + assert len(hypotheses) > 0 + + +def test_ire_agent_workflow(): + """Test complete IRE agent workflow with iterative refinement""" + agent = IterativeReflectiveExpansion( + agent_name="Workflow-Test-Agent", + description="Agent for testing complete workflow", + model_name="gpt-4o", + max_iterations=2, + output_type="dict", + ) + + # Test with a problem that requires iterative refinement + task = "Design an efficient public transportation system for a small city" + result = agent.run(task) + + # Verify the result is valid + assert result is not None + assert isinstance(result, (str, dict)) + + # Check that conversation was populated during execution + assert agent.conversation is not None From 688772e99b9ab3e34542df097c09ffb8e58e1a63 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sat, 22 Nov 2025 17:53:49 -0800 Subject: [PATCH 32/42] [FEAT][LLMCouncil][Docs][Examples] --- docs/examples/llm_council_examples.md | 106 ++++ docs/mkdocs.yml | 2 + docs/swarms/structs/llm_council.md | 453 +++++++++++++++++ .../llm_council_examples/README.md | 95 ++++ .../business_strategy_council.py | 32 ++ .../etf_stock_analysis_council.py | 30 ++ .../finance_analysis_council.py | 30 ++ .../legal_analysis_council.py | 32 ++ .../marketing_strategy_council.py | 29 ++ .../medical_diagnosis_council.py | 37 ++ .../medical_treatment_council.py | 31 ++ .../research_analysis_council.py | 32 ++ .../technology_assessment_council.py | 32 ++ hiearchical_swarm_example.py | 3 +- llm_council_example.py | 23 + pyproject.toml | 2 +- swarms/structs/__init__.py | 2 + swarms/structs/aop.py | 1 + swarms/structs/llm_council.py | 459 ++++++++++++++++++ 19 files changed, 1428 insertions(+), 3 deletions(-) create mode 100644 docs/examples/llm_council_examples.md create mode 100644 docs/swarms/structs/llm_council.md create mode 100644 examples/multi_agent/llm_council_examples/README.md create mode 100644 examples/multi_agent/llm_council_examples/business_strategy_council.py create mode 100644 examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py create mode 100644 examples/multi_agent/llm_council_examples/finance_analysis_council.py create mode 100644 examples/multi_agent/llm_council_examples/legal_analysis_council.py create mode 100644 examples/multi_agent/llm_council_examples/marketing_strategy_council.py create mode 100644 examples/multi_agent/llm_council_examples/medical_diagnosis_council.py create mode 100644 examples/multi_agent/llm_council_examples/medical_treatment_council.py create mode 100644 examples/multi_agent/llm_council_examples/research_analysis_council.py create mode 100644 examples/multi_agent/llm_council_examples/technology_assessment_council.py create mode 100644 llm_council_example.py create mode 100644 swarms/structs/llm_council.py diff --git a/docs/examples/llm_council_examples.md b/docs/examples/llm_council_examples.md new file mode 100644 index 00000000..ab607dbc --- /dev/null +++ b/docs/examples/llm_council_examples.md @@ -0,0 +1,106 @@ +# LLM Council Examples + +This page provides examples demonstrating the LLM Council pattern, inspired by Andrej Karpathy's llm-council implementation. The LLM Council uses multiple specialized AI agents that: + +1. Each respond independently to queries +2. Review and rank each other's anonymized responses +3. Have a Chairman synthesize all responses into a final comprehensive answer + +## Example Files + +All LLM Council examples are located in the [`examples/multi_agent/llm_council_examples/`](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent/llm_council_examples) directory. + +### Marketing & Business + +- **[marketing_strategy_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/marketing_strategy_council.py)** - Marketing strategy analysis and recommendations +- **[business_strategy_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/business_strategy_council.py)** - Comprehensive business strategy development + +### Finance & Investment + +- **[finance_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/finance_analysis_council.py)** - Financial analysis and investment recommendations +- **[etf_stock_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py)** - ETF and stock analysis with portfolio recommendations + +### Medical & Healthcare + +- **[medical_treatment_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/medical_treatment_council.py)** - Medical treatment recommendations and care plans +- **[medical_diagnosis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py)** - Diagnostic analysis based on symptoms + +### Technology & Research + +- **[technology_assessment_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/technology_assessment_council.py)** - Technology evaluation and implementation strategy +- **[research_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/research_analysis_council.py)** - Comprehensive research analysis on complex topics + +### Legal + +- **[legal_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/legal_analysis_council.py)** - Legal implications and compliance analysis + +## Basic Usage Pattern + +All examples follow the same pattern: + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Run a query +result = council.run("Your query here") + +# Access results +print(result["final_response"]) # Chairman's synthesized answer +print(result["original_responses"]) # Individual member responses +print(result["evaluations"]) # How members ranked each other +``` + +## Running Examples + +Run any example directly: + +```bash +python examples/multi_agent/llm_council_examples/marketing_strategy_council.py +python examples/multi_agent/llm_council_examples/finance_analysis_council.py +python examples/multi_agent/llm_council_examples/medical_diagnosis_council.py +``` + +## Key Features + +- **Multiple Perspectives**: Each council member (GPT-5.1, Gemini, Claude, Grok) provides unique insights +- **Peer Review**: Members evaluate and rank each other's responses anonymously +- **Synthesis**: Chairman combines the best elements from all responses +- **Transparency**: See both individual responses and evaluation rankings + +## Council Members + +The default council consists of: +- **GPT-5.1-Councilor**: Analytical and comprehensive +- **Gemini-3-Pro-Councilor**: Concise and well-processed +- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced +- **Grok-4-Councilor**: Creative and innovative + +## Customization + +You can create custom council members: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +custom_agent = Agent( + agent_name="Custom-Councilor", + system_prompt=get_gpt_councilor_prompt(), + model_name="gpt-4.1", + max_loops=1, +) + +council = LLMCouncil( + council_members=[custom_agent, ...], + chairman_model="gpt-5.1", + verbose=True +) +``` + +## Documentation + +For complete API reference and detailed documentation, see the [LLM Council Reference Documentation](../swarms/structs/llm_council.md). + diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 5b70d5f6..53936b07 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -281,6 +281,7 @@ nav: - MALT: "swarms/structs/malt.md" - Multi-Agent Execution Utilities: "swarms/structs/various_execution_methods.md" - Council of Judges: "swarms/structs/council_of_judges.md" + - LLM Council: "swarms/structs/llm_council.md" - Heavy Swarm: "swarms/structs/heavy_swarm.md" - Social Algorithms: "swarms/structs/social_algorithms.md" @@ -401,6 +402,7 @@ nav: - ConcurrentWorkflow Example: "swarms/examples/concurrent_workflow.md" - Multi-Agentic Patterns with GraphWorkflow: "swarms/examples/graphworkflow_rustworkx_patterns.md" - Mixture of Agents Example: "swarms/examples/moa_example.md" + - LLM Council Examples: "examples/llm_council_examples.md" - Unique Swarms: "swarms/examples/unique_swarms.md" - Agents as Tools: "swarms/examples/agents_as_tools.md" - Aggregate Multi-Agent Responses: "swarms/examples/aggregate.md" diff --git a/docs/swarms/structs/llm_council.md b/docs/swarms/structs/llm_council.md new file mode 100644 index 00000000..6352bcef --- /dev/null +++ b/docs/swarms/structs/llm_council.md @@ -0,0 +1,453 @@ +# LLM Council Class Documentation + +```mermaid +flowchart TD + A[User Query] --> B[LLM Council Initialization] + B --> C{Council Members Provided?} + C -->|No| D[Create Default Council] + C -->|Yes| E[Use Provided Members] + D --> F[Step 1: Parallel Response Generation] + E --> F + + subgraph "Default Council Members" + G1[GPT-5.1-Councilor
Analytical & Comprehensive] + G2[Gemini-3-Pro-Councilor
Concise & Structured] + G3[Claude-Sonnet-4.5-Councilor
Thoughtful & Balanced] + G4[Grok-4-Councilor
Creative & Innovative] + end + + F --> G1 + F --> G2 + F --> G3 + F --> G4 + + G1 --> H[Collect All Responses] + G2 --> H + G3 --> H + G4 --> H + + H --> I[Step 2: Anonymize Responses] + I --> J[Assign Anonymous IDs: A, B, C, D...] + + J --> K[Step 3: Parallel Evaluation] + + subgraph "Evaluation Phase" + K --> L1[Member 1 Evaluates All] + K --> L2[Member 2 Evaluates All] + K --> L3[Member 3 Evaluates All] + K --> L4[Member 4 Evaluates All] + end + + L1 --> M[Collect Evaluations & Rankings] + L2 --> M + L3 --> M + L4 --> M + + M --> N[Step 4: Chairman Synthesis] + N --> O[Chairman Agent] + O --> P[Final Synthesized Response] + + P --> Q[Return Results Dictionary] + + style A fill:#e1f5ff + style P fill:#c8e6c9 + style Q fill:#c8e6c9 + style O fill:#fff9c4 +``` + +The `LLMCouncil` class orchestrates multiple specialized LLM agents to collaboratively answer queries through a structured peer review and synthesis process. Inspired by Andrej Karpathy's llm-council implementation, this architecture demonstrates how different models evaluate and rank each other's work, often selecting responses from other models as superior to their own. + +## Workflow Overview + +The LLM Council follows a four-step process: + +1. **Parallel Response Generation**: All council members independently respond to the user query +2. **Anonymization**: Responses are anonymized with random IDs (A, B, C, D, etc.) to ensure objective evaluation +3. **Peer Review**: Each member evaluates and ranks all responses (including potentially their own) +4. **Synthesis**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer + +## Class Definition + +### LLMCouncil + +```python +class LLMCouncil: +``` + +### Attributes + +| Attribute | Type | Description | Default | +|-----------|------|-------------|---------| +| `council_members` | `List[Agent]` | List of Agent instances representing council members | `None` (creates default council) | +| `chairman` | `Agent` | The Chairman agent responsible for synthesizing responses | Created during initialization | +| `verbose` | `bool` | Whether to print progress and intermediate results | `True` | + +## Methods + +### `__init__` + +Initializes the LLM Council with council members and a Chairman agent. + +#### Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `council_members` | `Optional[List[Agent]]` | `None` | List of Agent instances representing council members. If `None`, creates default council with GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4. | +| `chairman_model` | `str` | `"gpt-5.1"` | Model name for the Chairman agent that synthesizes responses. | +| `verbose` | `bool` | `True` | Whether to print progress and intermediate results. | + +#### Returns + +| Type | Description | +|------|-------------| +| `LLMCouncil` | Initialized LLM Council instance. | + +#### Description + +Creates an LLM Council instance with specialized council members. If no members are provided, it creates a default council consisting of: +- **GPT-5.1-Councilor**: Analytical and comprehensive responses +- **Gemini-3-Pro-Councilor**: Concise and well-processed responses +- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced responses +- **Grok-4-Councilor**: Creative and innovative responses + +The Chairman agent is automatically created with a specialized prompt for synthesizing responses. + +#### Example Usage + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create council with default members +council = LLMCouncil(verbose=True) + +# Create council with custom members +from swarms import Agent +custom_members = [ + Agent(agent_name="Expert-1", model_name="gpt-4", max_loops=1), + Agent(agent_name="Expert-2", model_name="claude-3-opus", max_loops=1), +] +council = LLMCouncil( + council_members=custom_members, + chairman_model="gpt-4", + verbose=True +) +``` + +--- + +### `run` + +Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis. + +#### Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `query` | `str` | Required | The user's query to process through the council. | + +#### Returns + +| Type | Description | +|------|-------------| +| `Dict` | Dictionary containing the following keys: | + +#### Return Dictionary Structure + +| Key | Type | Description | +|-----|------|-------------| +| `query` | `str` | The original user query. | +| `original_responses` | `Dict[str, str]` | Dictionary mapping council member names to their original responses. | +| `evaluations` | `Dict[str, str]` | Dictionary mapping evaluator names to their evaluation texts (rankings and reasoning). | +| `final_response` | `str` | The Chairman's synthesized final answer combining all perspectives. | +| `anonymous_mapping` | `Dict[str, str]` | Mapping from anonymous IDs (A, B, C, D) to member names for reference. | + +#### Description + +Executes the complete LLM Council workflow: + +1. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently` +2. **Collection Phase**: Collects all responses and maps them to member names +3. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity +4. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution` +5. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer + +The method provides verbose output by default, showing progress at each stage. + +#### Example Usage + +```python +from swarms.structs.llm_council import LLMCouncil + +council = LLMCouncil(verbose=True) + +query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" + +result = council.run(query) + +# Access the final synthesized response +print(result["final_response"]) + +# Access individual member responses +for name, response in result["original_responses"].items(): + print(f"{name}: {response[:200]}...") + +# Access evaluation rankings +for evaluator, evaluation in result["evaluations"].items(): + print(f"{evaluator} evaluation:\n{evaluation[:300]}...") + +# Check anonymous mapping +print("Anonymous IDs:", result["anonymous_mapping"]) +``` + +--- + +### `_create_default_council` + +Creates default council members with specialized prompts and models. + +#### Parameters + +None (internal method). + +#### Returns + +| Type | Description | +|------|-------------| +| `List[Agent]` | List of Agent instances configured as council members. | + +#### Description + +Internal method that creates the default council configuration with four specialized agents: + +- **GPT-5.1-Councilor** (`model_name="gpt-5.1"`): Analytical and comprehensive, temperature=0.7 +- **Gemini-3-Pro-Councilor** (`model_name="gemini-2.5-flash"`): Concise and structured, temperature=0.7 +- **Claude-Sonnet-4.5-Councilor** (`model_name="anthropic/claude-sonnet-4-5"`): Thoughtful and balanced, temperature=0.0 +- **Grok-4-Councilor** (`model_name="x-ai/grok-4"`): Creative and innovative, temperature=0.8 + +Each agent is configured with: +- Specialized system prompts matching their role +- `max_loops=1` for single-response generation +- `verbose=False` to reduce noise during parallel execution +- Appropriate temperature settings for their style + +--- + +## Helper Functions + +### `get_gpt_councilor_prompt()` + +Returns the system prompt for GPT-5.1 councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing analytical thinking and comprehensive coverage. | + +--- + +### `get_gemini_councilor_prompt()` + +Returns the system prompt for Gemini 3 Pro councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing concise, well-processed, and structured responses. | + +--- + +### `get_claude_councilor_prompt()` + +Returns the system prompt for Claude Sonnet 4.5 councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing thoughtful, balanced, and nuanced responses. | + +--- + +### `get_grok_councilor_prompt()` + +Returns the system prompt for Grok-4 councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing creative, innovative, and unique perspectives. | + +--- + +### `get_chairman_prompt()` + +Returns the system prompt for the Chairman agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string for synthesizing responses and evaluations into a final answer. | + +--- + +### `get_evaluation_prompt(query, responses, evaluator_name)` + +Creates evaluation prompt for council members to review and rank responses. + +#### Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `query` | `str` | The original user query. | +| `responses` | `Dict[str, str]` | Dictionary mapping anonymous IDs to response texts. | +| `evaluator_name` | `str` | Name of the agent doing the evaluation. | + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | Formatted evaluation prompt string with instructions for ranking responses. | + +--- + +### `get_synthesis_prompt(query, original_responses, evaluations, id_to_member)` + +Creates synthesis prompt for the Chairman. + +#### Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `query` | `str` | Original user query. | +| `original_responses` | `Dict[str, str]` | Dictionary mapping member names to their responses. | +| `evaluations` | `Dict[str, str]` | Dictionary mapping evaluator names to their evaluation texts. | +| `id_to_member` | `Dict[str, str]` | Mapping from anonymous IDs to member names. | + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | Formatted synthesis prompt for the Chairman agent. | + +--- + +## Use Cases + +The LLM Council is ideal for scenarios requiring: + +- **Multi-perspective Analysis**: When you need diverse viewpoints on complex topics +- **Quality Assurance**: When peer review and ranking can improve response quality +- **Transparent Decision Making**: When you want to see how different models evaluate each other +- **Synthesis of Expertise**: When combining multiple specialized perspectives is valuable + +### Common Applications + +- **Medical Diagnosis**: Multiple medical AI agents provide diagnoses, evaluate each other, and synthesize recommendations +- **Financial Analysis**: Different financial experts analyze investments and rank each other's assessments +- **Legal Analysis**: Multiple legal perspectives evaluate compliance and risk +- **Business Strategy**: Diverse strategic viewpoints are synthesized into comprehensive plans +- **Research Analysis**: Multiple research perspectives are combined for thorough analysis + +## Examples + +For comprehensive examples demonstrating various use cases, see the [LLM Council Examples](../../../examples/multi_agent/llm_council_examples/) directory: + +- **Medical**: `medical_diagnosis_council.py`, `medical_treatment_council.py` +- **Finance**: `finance_analysis_council.py`, `etf_stock_analysis_council.py` +- **Business**: `business_strategy_council.py`, `marketing_strategy_council.py` +- **Technology**: `technology_assessment_council.py`, `research_analysis_council.py` +- **Legal**: `legal_analysis_council.py` + +### Quick Start Example + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Example query +query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + +# Optionally print evaluations +print("\n\n" + "="*80) +print("EVALUATIONS") +print("="*80) +for name, evaluation in result["evaluations"].items(): + print(f"\n{name}:") + print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) +``` + +## Customization + +### Creating Custom Council Members + +You can create custom council members with specialized roles: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +# Create custom councilor +custom_agent = Agent( + agent_name="Domain-Expert-Councilor", + agent_description="Specialized domain expert for specific analysis", + system_prompt=get_gpt_councilor_prompt(), # Or create custom prompt + model_name="gpt-4", + max_loops=1, + verbose=False, + temperature=0.7, +) + +# Create council with custom members +council = LLMCouncil( + council_members=[custom_agent, ...], # Add your custom agents + chairman_model="gpt-4", + verbose=True +) +``` + +### Custom Chairman Model + +You can specify a different model for the Chairman: + +```python +council = LLMCouncil( + chairman_model="claude-3-opus", # Use Claude as Chairman + verbose=True +) +``` + +## Architecture Benefits + +1. **Diversity**: Multiple models provide varied perspectives and approaches +2. **Quality Control**: Peer review ensures responses are evaluated objectively +3. **Synthesis**: Chairman combines the best elements from all responses +4. **Transparency**: Full visibility into individual responses and evaluation rankings +5. **Scalability**: Easy to add or remove council members +6. **Flexibility**: Supports custom agents and models + +## Performance Considerations + +- **Parallel Execution**: Both response generation and evaluation phases run in parallel for efficiency +- **Anonymization**: Responses are anonymized to prevent bias in evaluation +- **Model Selection**: Different models can be used for different roles based on their strengths +- **Verbose Mode**: Can be disabled for production use to reduce output + +## Related Documentation + +- [Multi-Agent Architectures Overview](overview.md) +- [Council of Judges](council_of_judges.md) - Similar peer review pattern +- [Agent Class Reference](agent.md) - Understanding individual agents +- [Multi-Agent Execution Utilities](various_execution_methods.md) - Underlying execution methods + diff --git a/examples/multi_agent/llm_council_examples/README.md b/examples/multi_agent/llm_council_examples/README.md new file mode 100644 index 00000000..3dd62f16 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/README.md @@ -0,0 +1,95 @@ +# LLM Council Examples + +This directory contains examples demonstrating the LLM Council pattern, inspired by Andrej Karpathy's llm-council implementation. The LLM Council uses multiple specialized AI agents that: + +1. Each respond independently to queries +2. Review and rank each other's anonymized responses +3. Have a Chairman synthesize all responses into a final comprehensive answer + +## Examples + +### Marketing & Business +- **marketing_strategy_council.py** - Marketing strategy analysis and recommendations +- **business_strategy_council.py** - Comprehensive business strategy development + +### Finance & Investment +- **finance_analysis_council.py** - Financial analysis and investment recommendations +- **etf_stock_analysis_council.py** - ETF and stock analysis with portfolio recommendations + +### Medical & Healthcare +- **medical_treatment_council.py** - Medical treatment recommendations and care plans +- **medical_diagnosis_council.py** - Diagnostic analysis based on symptoms + +### Technology & Research +- **technology_assessment_council.py** - Technology evaluation and implementation strategy +- **research_analysis_council.py** - Comprehensive research analysis on complex topics + +### Legal +- **legal_analysis_council.py** - Legal implications and compliance analysis + +## Usage + +Each example follows the same pattern: + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Run a query +result = council.run("Your query here") + +# Access results +print(result["final_response"]) # Chairman's synthesized answer +print(result["original_responses"]) # Individual member responses +print(result["evaluations"]) # How members ranked each other +``` + +## Running Examples + +Run any example directly: + +```bash +python examples/multi_agent/llm_council_examples/marketing_strategy_council.py +python examples/multi_agent/llm_council_examples/finance_analysis_council.py +python examples/multi_agent/llm_council_examples/medical_diagnosis_council.py +``` + +## Key Features + +- **Multiple Perspectives**: Each council member (GPT-5.1, Gemini, Claude, Grok) provides unique insights +- **Peer Review**: Members evaluate and rank each other's responses anonymously +- **Synthesis**: Chairman combines the best elements from all responses +- **Transparency**: See both individual responses and evaluation rankings + +## Council Members + +The default council consists of: +- **GPT-5.1-Councilor**: Analytical and comprehensive +- **Gemini-3-Pro-Councilor**: Concise and well-processed +- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced +- **Grok-4-Councilor**: Creative and innovative + +## Customization + +You can create custom council members: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +custom_agent = Agent( + agent_name="Custom-Councilor", + system_prompt=get_gpt_councilor_prompt(), + model_name="gpt-4.1", + max_loops=1, +) + +council = LLMCouncil( + council_members=[custom_agent, ...], + chairman_model="gpt-5.1", + verbose=True +) +``` + diff --git a/examples/multi_agent/llm_council_examples/business_strategy_council.py b/examples/multi_agent/llm_council_examples/business_strategy_council.py new file mode 100644 index 00000000..bacc8995 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/business_strategy_council.py @@ -0,0 +1,32 @@ +""" +LLM Council Example: Business Strategy Development + +This example demonstrates using the LLM Council to develop comprehensive +business strategies for new ventures. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Business strategy query +query = """ +A tech startup wants to launch an AI-powered personal finance app targeting +millennials and Gen Z. Develop a comprehensive business strategy including: +1. Market opportunity and competitive landscape analysis +2. Product positioning and unique value proposition +3. Go-to-market strategy and customer acquisition plan +4. Revenue model and pricing strategy +5. Key partnerships and distribution channels +6. Resource requirements and funding needs +7. Risk assessment and mitigation strategies +8. Success metrics and KPIs for first 12 months +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py new file mode 100644 index 00000000..b69ffb70 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py @@ -0,0 +1,30 @@ +""" +LLM Council Example: ETF Stock Analysis + +This example demonstrates using the LLM Council to analyze ETF holdings +and provide stock investment recommendations. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# ETF and stock analysis query +query = """ +Analyze the top energy ETFs (including nuclear, solar, gas, and renewable energy) +and provide: +1. Top 5 best-performing energy stocks across all energy sectors +2. ETF recommendations for diversified energy exposure +3. Risk-return profiles for each recommendation +4. Current market conditions affecting energy investments +5. Allocation strategy for a $100,000 portfolio +6. Key metrics to track for each investment +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/finance_analysis_council.py b/examples/multi_agent/llm_council_examples/finance_analysis_council.py new file mode 100644 index 00000000..d1f4c9a5 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/finance_analysis_council.py @@ -0,0 +1,30 @@ +""" +LLM Council Example: Financial Analysis + +This example demonstrates using the LLM Council to provide comprehensive +financial analysis and investment recommendations. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Financial analysis query +query = """ +Provide a comprehensive financial analysis for investing in emerging markets +technology ETFs. Include: +1. Risk assessment and volatility analysis +2. Historical performance trends +3. Sector composition and diversification benefits +4. Comparison with developed market tech ETFs +5. Recommended allocation percentage for a moderate risk portfolio +6. Key factors to monitor going forward +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/legal_analysis_council.py b/examples/multi_agent/llm_council_examples/legal_analysis_council.py new file mode 100644 index 00000000..01bdcdc8 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/legal_analysis_council.py @@ -0,0 +1,32 @@ +""" +LLM Council Example: Legal Analysis + +This example demonstrates using the LLM Council to analyze legal scenarios +and provide comprehensive legal insights. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Legal analysis query +query = """ +A startup is considering using AI-generated content for their marketing materials. +Analyze the legal implications including: +1. Intellectual property rights and ownership of AI-generated content +2. Copyright and trademark considerations +3. Liability for AI-generated content that may be inaccurate or misleading +4. Compliance with advertising regulations (FTC, FDA, etc.) +5. Data privacy implications if using customer data to train models +6. Contractual considerations with AI service providers +7. Risk mitigation strategies +8. Best practices for legal compliance +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/marketing_strategy_council.py b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py new file mode 100644 index 00000000..b033d982 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py @@ -0,0 +1,29 @@ +""" +LLM Council Example: Marketing Strategy Analysis + +This example demonstrates using the LLM Council to analyze and develop +comprehensive marketing strategies by leveraging multiple AI perspectives. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Marketing strategy query +query = """ +Analyze the marketing strategy for a new sustainable energy startup launching +a solar panel subscription service. Provide recommendations on: +1. Target audience segmentation +2. Key messaging and value propositions +3. Marketing channels and budget allocation +4. Competitive positioning +5. Launch timeline and milestones +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py new file mode 100644 index 00000000..f143945c --- /dev/null +++ b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py @@ -0,0 +1,37 @@ +""" +LLM Council Example: Medical Diagnosis Analysis + +This example demonstrates using the LLM Council to analyze symptoms +and provide diagnostic insights. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Medical diagnosis query +query = """ +A 35-year-old patient presents with: +- Persistent fatigue for 3 months +- Unexplained weight loss (15 lbs) +- Night sweats +- Intermittent low-grade fever +- Swollen lymph nodes in neck and armpits +- Recent blood work shows elevated ESR and CRP + +Provide: +1. Differential diagnosis with most likely conditions ranked +2. Additional diagnostic tests needed to confirm +3. Red flag symptoms requiring immediate attention +4. Possible causes and risk factors +5. Recommended next steps for the patient +6. When to seek emergency care +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/medical_treatment_council.py b/examples/multi_agent/llm_council_examples/medical_treatment_council.py new file mode 100644 index 00000000..cd828f1d --- /dev/null +++ b/examples/multi_agent/llm_council_examples/medical_treatment_council.py @@ -0,0 +1,31 @@ +""" +LLM Council Example: Medical Treatment Analysis + +This example demonstrates using the LLM Council to analyze medical treatments +and provide comprehensive treatment recommendations. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Medical treatment query +query = """ +A 45-year-old patient with Type 2 diabetes, hypertension, and early-stage +kidney disease needs treatment recommendations. Provide: +1. Comprehensive treatment plan addressing all conditions +2. Medication options with pros/cons for each condition +3. Lifestyle modifications and their expected impact +4. Monitoring schedule and key metrics to track +5. Potential drug interactions and contraindications +6. Expected outcomes and timeline for improvement +7. When to consider specialist referrals +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/research_analysis_council.py b/examples/multi_agent/llm_council_examples/research_analysis_council.py new file mode 100644 index 00000000..e276c96b --- /dev/null +++ b/examples/multi_agent/llm_council_examples/research_analysis_council.py @@ -0,0 +1,32 @@ +""" +LLM Council Example: Research Analysis + +This example demonstrates using the LLM Council to conduct comprehensive +research analysis on complex topics. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Research analysis query +query = """ +Conduct a comprehensive analysis of the potential impact of climate change +on global food security over the next 20 years. Include: +1. Key climate factors affecting agriculture (temperature, precipitation, extreme weather) +2. Regional vulnerabilities and impacts on major food-producing regions +3. Crop yield projections and food availability scenarios +4. Economic implications and food price volatility +5. Adaptation strategies and technological solutions +6. Policy recommendations for governments and international organizations +7. Role of innovation in agriculture (precision farming, GMOs, vertical farming) +8. Social and geopolitical implications of food insecurity +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/technology_assessment_council.py b/examples/multi_agent/llm_council_examples/technology_assessment_council.py new file mode 100644 index 00000000..72c227a6 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/technology_assessment_council.py @@ -0,0 +1,32 @@ +""" +LLM Council Example: Technology Assessment + +This example demonstrates using the LLM Council to assess emerging technologies +and their business implications. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Technology assessment query +query = """ +Evaluate the business potential and implementation strategy for integrating +quantum computing capabilities into a financial services company. Consider: +1. Current state of quantum computing technology +2. Specific use cases in financial services (risk modeling, portfolio optimization, fraud detection) +3. Competitive advantages and potential ROI +4. Implementation timeline and resource requirements +5. Technical challenges and limitations +6. Risk factors and mitigation strategies +7. Partnership opportunities with quantum computing providers +8. Expected timeline for practical business value +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/hiearchical_swarm_example.py b/hiearchical_swarm_example.py index 753ebf0f..a5ed0633 100644 --- a/hiearchical_swarm_example.py +++ b/hiearchical_swarm_example.py @@ -1,5 +1,4 @@ -from swarms.structs.hiearchical_swarm import HierarchicalSwarm -from swarms.structs.agent import Agent +from swarms import Agent, HierarchicalSwarm # Create specialized agents research_agent = Agent( diff --git a/llm_council_example.py b/llm_council_example.py new file mode 100644 index 00000000..078d5360 --- /dev/null +++ b/llm_council_example.py @@ -0,0 +1,23 @@ +from swarms.structs.llm_council import LLMCouncil + +# Example usage of the LLM Council without a function: +# Create the council +council = LLMCouncil(verbose=True) + +# Example query +query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + +# Optionally print evaluations +print("\n\n" + "="*80) +print("EVALUATIONS") +print("="*80) +for name, evaluation in result["evaluations"].items(): + print(f"\n{name}:") + print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) + diff --git a/pyproject.toml b/pyproject.toml index 10ad1565..dceec924 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "8.6.3" +version = "8.6.4" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index e0d3430a..6952d2b0 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -11,6 +11,7 @@ from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms.structs.conversation import Conversation from swarms.structs.council_as_judge import CouncilAsAJudge from swarms.structs.cron_job import CronJob +from swarms.structs.llm_council import LLMCouncil from swarms.structs.debate_with_judge import DebateWithJudge from swarms.structs.graph_workflow import ( Edge, @@ -161,6 +162,7 @@ __all__ = [ "get_swarms_info", "AutoSwarmBuilder", "CouncilAsAJudge", + "LLMCouncil", "batch_agent_execution", "aggregate", "find_agent_by_name", diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index e693a90c..1bc3dc52 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -679,6 +679,7 @@ class AOP: self.tool_configs: Dict[str, AgentToolConfig] = {} self.task_queues: Dict[str, TaskQueue] = {} self.transport = transport + self.mcp_server = FastMCP( name=server_name, port=port, diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py new file mode 100644 index 00000000..864ec976 --- /dev/null +++ b/swarms/structs/llm_council.py @@ -0,0 +1,459 @@ +""" +LLM Council - A Swarms implementation inspired by Andrej Karpathy's llm-council. + +This implementation creates a council of specialized LLM agents that: +1. Each agent responds to the user query independently +2. All agents review and rank each other's (anonymized) responses +3. A Chairman LLM synthesizes all responses and rankings into a final answer + +The council demonstrates how different models evaluate and rank each other's work, +often selecting responses from other models as superior to their own. +""" + +from typing import Dict, List, Optional +import random +from swarms import Agent +from swarms.structs.multi_agent_exec import ( + run_agents_concurrently, + batched_grid_agent_execution, +) + + +def get_gpt_councilor_prompt() -> str: + """ + Get system prompt for GPT-5.1 councilor. + + Returns: + System prompt string for GPT-5.1 councilor agent. + """ + return """You are a member of the LLM Council, representing GPT-5.1. Your role is to provide comprehensive, analytical, and thorough responses to user queries. + +Your strengths: +- Deep analytical thinking and comprehensive coverage +- Ability to break down complex topics into detailed components +- Thorough exploration of multiple perspectives +- Rich contextual understanding + +Your approach: +- Provide detailed, well-structured responses +- Include relevant context and background information +- Consider multiple angles and perspectives +- Be thorough but clear in your explanations + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on quality, depth, and clarity.""" + + +def get_gemini_councilor_prompt() -> str: + """ + Get system prompt for Gemini 3 Pro councilor. + + Returns: + System prompt string for Gemini 3 Pro councilor agent. + """ + return """You are a member of the LLM Council, representing Gemini 3 Pro. Your role is to provide concise, well-processed, and structured responses to user queries. + +Your strengths: +- Clear and structured communication +- Efficient information processing +- Condensed yet comprehensive responses +- Well-organized presentation + +Your approach: +- Provide concise but complete answers +- Structure information clearly and logically +- Focus on key points without unnecessary verbosity +- Present information in an easily digestible format + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on clarity, structure, and efficiency.""" + + +def get_claude_councilor_prompt() -> str: + """ + Get system prompt for Claude Sonnet 4.5 councilor. + + Returns: + System prompt string for Claude Sonnet 4.5 councilor agent. + """ + return """You are a member of the LLM Council, representing Claude Sonnet 4.5. Your role is to provide thoughtful, balanced, and nuanced responses to user queries. + +Your strengths: +- Nuanced understanding and balanced perspectives +- Thoughtful consideration of trade-offs +- Clear reasoning and logical structure +- Ethical and responsible analysis + +Your approach: +- Provide balanced, well-reasoned responses +- Consider multiple viewpoints and implications +- Be thoughtful about potential limitations or edge cases +- Maintain clarity while showing depth of thought + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on thoughtfulness, balance, and nuanced reasoning.""" + + +def get_grok_councilor_prompt() -> str: + """ + Get system prompt for Grok-4 councilor. + + Returns: + System prompt string for Grok-4 councilor agent. + """ + return """You are a member of the LLM Council, representing Grok-4. Your role is to provide creative, innovative, and unique perspectives on user queries. + +Your strengths: +- Creative problem-solving and innovative thinking +- Unique perspectives and out-of-the-box approaches +- Engaging and dynamic communication style +- Ability to connect seemingly unrelated concepts + +Your approach: +- Provide creative and innovative responses +- Offer unique perspectives and fresh insights +- Be engaging and dynamic in your communication +- Think creatively while maintaining accuracy + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on creativity, innovation, and unique insights.""" + + +def get_chairman_prompt() -> str: + """ + Get system prompt for the Chairman agent. + + Returns: + System prompt string for the Chairman agent. + """ + return """You are the Chairman of the LLM Council. Your role is to synthesize responses from all council members along with their evaluations and rankings into a final, comprehensive answer. + +Your responsibilities: +1. Review all council member responses to the user's query +2. Consider the rankings and evaluations provided by each council member +3. Synthesize the best elements from all responses +4. Create a final, comprehensive answer that incorporates the strengths of different approaches +5. Provide transparency about which perspectives influenced the final answer + +Your approach: +- Synthesize rather than simply aggregate +- Identify the strongest elements from each response +- Create a cohesive final answer that benefits from multiple perspectives +- Acknowledge the diversity of approaches taken by council members +- Provide a balanced, comprehensive response that serves the user's needs + +Remember: You have access to all original responses and all evaluations. Use this rich context to create the best possible final answer.""" + + +def get_evaluation_prompt(query: str, responses: Dict[str, str], evaluator_name: str) -> str: + """ + Create evaluation prompt for council members to review and rank responses. + + Args: + query: The original user query + responses: Dictionary mapping anonymous IDs to response texts + evaluator_name: Name of the agent doing the evaluation + + Returns: + Formatted evaluation prompt string + """ + responses_text = "\n\n".join([ + f"Response {response_id}:\n{response_text}" + for response_id, response_text in responses.items() + ]) + + return f"""You are evaluating responses from your fellow LLM Council members to the following query: + +QUERY: {query} + +Below are the anonymized responses from all council members (including potentially your own): + +{responses_text} + +Your task: +1. Carefully read and analyze each response +2. Evaluate the quality, accuracy, completeness, and usefulness of each response +3. Rank the responses from best to worst (1 = best, {len(responses)} = worst) +4. Provide brief reasoning for your rankings +5. Be honest and objective - you may find another model's response superior to your own + +Format your evaluation as follows: + +RANKINGS: +1. Response [ID]: [Brief reason why this is the best] +2. Response [ID]: [Brief reason] +... +{len(responses)}. Response [ID]: [Brief reason why this ranks lowest] + +ADDITIONAL OBSERVATIONS: +[Any additional insights about the responses, common themes, strengths/weaknesses, etc.] + +Remember: The goal is honest, objective evaluation. If another model's response is genuinely better, acknowledge it.""" + + +def get_synthesis_prompt( + query: str, + original_responses: Dict[str, str], + evaluations: Dict[str, str], + id_to_member: Dict[str, str] +) -> str: + """ + Create synthesis prompt for the Chairman. + + Args: + query: Original user query + original_responses: Dict mapping member names to their responses + evaluations: Dict mapping evaluator names to their evaluation texts + id_to_member: Mapping from anonymous IDs to member names + + Returns: + Formatted synthesis prompt + """ + responses_section = "\n\n".join([ + f"=== {name} ===\n{response}" + for name, response in original_responses.items() + ]) + + evaluations_section = "\n\n".join([ + f"=== Evaluation by {name} ===\n{evaluation}" + for name, evaluation in evaluations.items() + ]) + + return f"""As the Chairman of the LLM Council, synthesize the following information into a final, comprehensive answer. + +ORIGINAL QUERY: +{query} + +COUNCIL MEMBER RESPONSES: +{responses_section} + +COUNCIL MEMBER EVALUATIONS AND RANKINGS: +{evaluations_section} + +ANONYMOUS ID MAPPING (for reference): +{chr(10).join([f" {aid} = {name}" for aid, name in id_to_member.items()])} + +Your task: +1. Review all council member responses +2. Consider the evaluations and rankings provided by each member +3. Identify the strongest elements from each response +4. Synthesize a final, comprehensive answer that: + - Incorporates the best insights from multiple perspectives + - Addresses the query thoroughly and accurately + - Benefits from the diversity of approaches taken + - Is clear, well-structured, and useful + +Provide your final synthesized response below. You may reference which perspectives or approaches influenced different parts of your answer.""" + + +class LLMCouncil: + """ + An LLM Council that orchestrates multiple specialized agents to collaboratively + answer queries through independent responses, peer review, and synthesis. + + The council follows this workflow: + 1. Dispatch query to all council members in parallel + 2. Collect all responses (anonymized) + 3. Have each member review and rank all responses + 4. Chairman synthesizes everything into final response + """ + + def __init__( + self, + council_members: Optional[List[Agent]] = None, + chairman_model: str = "gpt-5.1", + verbose: bool = True, + ): + """ + Initialize the LLM Council. + + Args: + council_members: List of Agent instances representing council members. + If None, creates default council with GPT-5.1, Gemini 3 Pro, + Claude Sonnet 4.5, and Grok-4. + chairman_model: Model name for the Chairman agent that synthesizes responses. + verbose: Whether to print progress and intermediate results. + """ + self.verbose = verbose + + # Create default council members if none provided + if council_members is None: + self.council_members = self._create_default_council() + else: + self.council_members = council_members + + # Create Chairman agent + self.chairman = Agent( + agent_name="Chairman", + agent_description="Chairman of the LLM Council, responsible for synthesizing all responses and rankings into a final answer", + system_prompt=get_chairman_prompt(), + model_name=chairman_model, + max_loops=1, + verbose=verbose, + temperature=0.7, + ) + + if self.verbose: + print(f"πŸ›οΈ LLM Council initialized with {len(self.council_members)} members") + for i, member in enumerate(self.council_members, 1): + print(f" {i}. {member.agent_name} ({member.model_name})") + + def _create_default_council(self) -> List[Agent]: + """ + Create default council members with specialized prompts and models. + + Returns: + List of Agent instances configured as council members. + """ + + # GPT-5.1 Agent - Analytical and comprehensive + gpt_agent = Agent( + agent_name="GPT-5.1-Councilor", + agent_description="Analytical and comprehensive AI councilor specializing in deep analysis and thorough responses", + system_prompt=get_gpt_councilor_prompt(), + model_name="gpt-5.1", + max_loops=1, + verbose=False, + temperature=0.7, + ) + + # Gemini 3 Pro Agent - Concise and processed + gemini_agent = Agent( + agent_name="Gemini-3-Pro-Councilor", + agent_description="Concise and well-processed AI councilor specializing in clear, structured responses", + system_prompt=get_gemini_councilor_prompt(), + model_name="gemini-2.5-flash", # Using available Gemini model + max_loops=1, + verbose=False, + temperature=0.7, + ) + + # Claude Sonnet 4.5 Agent - Balanced and thoughtful + claude_agent = Agent( + agent_name="Claude-Sonnet-4.5-Councilor", + agent_description="Thoughtful and balanced AI councilor specializing in nuanced and well-reasoned responses", + system_prompt=get_claude_councilor_prompt(), + model_name="anthropic/claude-sonnet-4-5", # Using available Claude model + max_loops=1, + verbose=False, + temperature=0.0, + top_p=None, + ) + + # Grok-4 Agent - Creative and innovative + grok_agent = Agent( + agent_name="Grok-4-Councilor", + agent_description="Creative and innovative AI councilor specializing in unique perspectives and creative solutions", + system_prompt=get_grok_councilor_prompt(), + model_name="x-ai/grok-4", # Using available model as proxy for Grok-4 + max_loops=1, + verbose=False, + temperature=0.8, + ) + + members = [gpt_agent, gemini_agent, claude_agent, grok_agent] + + return members + + def run(self, query: str) -> Dict: + """ + Execute the full LLM Council workflow. + + Args: + query: The user's query to process + + Returns: + Dictionary containing: + - original_responses: Dict mapping member names to their responses + - evaluations: Dict mapping evaluator names to their rankings + - final_response: The Chairman's synthesized final answer + """ + if self.verbose: + print(f"\n{'='*80}") + print("πŸ›οΈ LLM COUNCIL SESSION") + print("="*80) + print(f"\nπŸ“ Query: {query}\n") + + # Step 1: Get responses from all council members in parallel + if self.verbose: + print("πŸ“€ Dispatching query to all council members...") + + results_dict = run_agents_concurrently( + self.council_members, + task=query, + return_agent_output_dict=True + ) + + # Map results to member names + original_responses = { + member.agent_name: response + for member, response in zip(self.council_members, + [results_dict.get(member.agent_name, "") + for member in self.council_members]) + } + + if self.verbose: + print(f"βœ… Received {len(original_responses)} responses\n") + for name, response in original_responses.items(): + print(f" {name}: {response[:100]}...") + + # Step 2: Anonymize responses for evaluation + # Create anonymous IDs (A, B, C, D, etc.) + anonymous_ids = [chr(65 + i) for i in range(len(self.council_members))] + random.shuffle(anonymous_ids) # Shuffle to ensure anonymity + + anonymous_responses = { + anonymous_ids[i]: original_responses[member.agent_name] + for i, member in enumerate(self.council_members) + } + + # Create mapping from anonymous ID to member name (for later reference) + id_to_member = { + anonymous_ids[i]: member.agent_name + for i, member in enumerate(self.council_members) + } + + if self.verbose: + print("\nπŸ” Council members evaluating each other's responses...") + + # Step 3: Have each member evaluate and rank all responses concurrently + # Create evaluation tasks for each member + evaluation_tasks = [ + get_evaluation_prompt(query, anonymous_responses, member.agent_name) + for member in self.council_members + ] + + # Run evaluations concurrently using batched_grid_agent_execution + evaluation_results = batched_grid_agent_execution( + self.council_members, + evaluation_tasks + ) + + # Map results to member names + evaluations = { + member.agent_name: evaluation_results[i] + for i, member in enumerate(self.council_members) + } + + if self.verbose: + print(f"βœ… Received {len(evaluations)} evaluations\n") + + # Step 4: Chairman synthesizes everything + if self.verbose: + print("πŸ‘” Chairman synthesizing final response...\n") + + synthesis_prompt = get_synthesis_prompt( + query, original_responses, evaluations, id_to_member + ) + + final_response = self.chairman.run(task=synthesis_prompt) + + if self.verbose: + print(f"{'='*80}") + print("βœ… FINAL RESPONSE") + print(f"{'='*80}\n") + + return { + "query": query, + "original_responses": original_responses, + "evaluations": evaluations, + "final_response": final_response, + "anonymous_mapping": id_to_member, + } + From 31e304305920bdbd1028f2cd3834df2d6e5079fa Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sat, 22 Nov 2025 21:48:11 -0800 Subject: [PATCH 33/42] [LLMCouncil][Fix import issue with agent] --- llm_council_example.py | 4 ---- pyproject.toml | 2 +- swarms/structs/llm_council.py | 2 +- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/llm_council_example.py b/llm_council_example.py index 078d5360..26f4bfec 100644 --- a/llm_council_example.py +++ b/llm_council_example.py @@ -1,6 +1,5 @@ from swarms.structs.llm_council import LLMCouncil -# Example usage of the LLM Council without a function: # Create the council council = LLMCouncil(verbose=True) @@ -14,9 +13,6 @@ result = council.run(query) print(result["final_response"]) # Optionally print evaluations -print("\n\n" + "="*80) -print("EVALUATIONS") -print("="*80) for name, evaluation in result["evaluations"].items(): print(f"\n{name}:") print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) diff --git a/pyproject.toml b/pyproject.toml index dceec924..0336f41f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "8.6.4" +version = "8.6.5" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py index 864ec976..b422136b 100644 --- a/swarms/structs/llm_council.py +++ b/swarms/structs/llm_council.py @@ -12,7 +12,7 @@ often selecting responses from other models as superior to their own. from typing import Dict, List, Optional import random -from swarms import Agent +from swarms.structs.agent import Agent from swarms.structs.multi_agent_exec import ( run_agents_concurrently, batched_grid_agent_execution, From 74f7bcd2b7f93822e6351d81db605ff86b8aaa17 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 23 Nov 2025 23:27:10 -0800 Subject: [PATCH 34/42] [Improve LLMCouncil] [Improved docs] --- docs/swarms/structs/llm_council.md | 244 ++++++++++++------ .../business_strategy_council.py | 1 - .../etf_stock_analysis_council.py | 1 - .../finance_analysis_council.py | 1 - .../legal_analysis_council.py | 1 - .../marketing_strategy_council.py | 1 - .../medical_diagnosis_council.py | 1 - .../medical_treatment_council.py | 1 - .../research_analysis_council.py | 1 - .../technology_assessment_council.py | 1 - llm_council_example.py | 7 +- swarms/agents/reasoning_agents.py | 4 +- swarms/agents/reasoning_duo.py | 4 +- swarms/structs/aop.py | 2 +- swarms/structs/llm_council.py | 215 +++++++++------ tests/structs/test_auto_swarms_builder.py | 4 +- tests/structs/test_i_agent.py | 2 - tests/structs/test_sequential_workflow.py | 1 - 18 files changed, 299 insertions(+), 193 deletions(-) diff --git a/docs/swarms/structs/llm_council.md b/docs/swarms/structs/llm_council.md index 6352bcef..0f83b0d9 100644 --- a/docs/swarms/structs/llm_council.md +++ b/docs/swarms/structs/llm_council.md @@ -2,61 +2,35 @@ ```mermaid flowchart TD - A[User Query] --> B[LLM Council Initialization] - B --> C{Council Members Provided?} - C -->|No| D[Create Default Council] - C -->|Yes| E[Use Provided Members] - D --> F[Step 1: Parallel Response Generation] - E --> F + A[User Query] --> B[Council Members] - subgraph "Default Council Members" - G1[GPT-5.1-Councilor
Analytical & Comprehensive] - G2[Gemini-3-Pro-Councilor
Concise & Structured] - G3[Claude-Sonnet-4.5-Councilor
Thoughtful & Balanced] - G4[Grok-4-Councilor
Creative & Innovative] + subgraph "Council Members" + C1[GPT-5.1-Councilor] + C2[Gemini-3-Pro-Councilor] + C3[Claude-Sonnet-4.5-Councilor] + C4[Grok-4-Councilor] end - F --> G1 - F --> G2 - F --> G3 - F --> G4 + B --> C1 + B --> C2 + B --> C3 + B --> C4 - G1 --> H[Collect All Responses] - G2 --> H - G3 --> H - G4 --> H + C1 --> D[Responses] + C2 --> D + C3 --> D + C4 --> D - H --> I[Step 2: Anonymize Responses] - I --> J[Assign Anonymous IDs: A, B, C, D...] - - J --> K[Step 3: Parallel Evaluation] - - subgraph "Evaluation Phase" - K --> L1[Member 1 Evaluates All] - K --> L2[Member 2 Evaluates All] - K --> L3[Member 3 Evaluates All] - K --> L4[Member 4 Evaluates All] - end - - L1 --> M[Collect Evaluations & Rankings] - L2 --> M - L3 --> M - L4 --> M - - M --> N[Step 4: Chairman Synthesis] - N --> O[Chairman Agent] - O --> P[Final Synthesized Response] - - P --> Q[Return Results Dictionary] - - style A fill:#e1f5ff - style P fill:#c8e6c9 - style Q fill:#c8e6c9 - style O fill:#fff9c4 + D --> E[Anonymize & Evaluate] + E --> F[Chairman Synthesis] + F --> G[Final Response] + ``` The `LLMCouncil` class orchestrates multiple specialized LLM agents to collaboratively answer queries through a structured peer review and synthesis process. Inspired by Andrej Karpathy's llm-council implementation, this architecture demonstrates how different models evaluate and rank each other's work, often selecting responses from other models as superior to their own. +The class automatically tracks all agent messages in a `Conversation` object and formats output using `history_output_formatter`, providing flexible output formats including dictionaries, lists, strings, JSON, YAML, and more. + ## Workflow Overview The LLM Council follows a four-step process: @@ -80,6 +54,8 @@ class LLMCouncil: |-----------|------|-------------|---------| | `council_members` | `List[Agent]` | List of Agent instances representing council members | `None` (creates default council) | | `chairman` | `Agent` | The Chairman agent responsible for synthesizing responses | Created during initialization | +| `conversation` | `Conversation` | Conversation object tracking all messages throughout the workflow | Created during initialization | +| `output_type` | `HistoryOutputType` | Format for the output (e.g., "dict", "list", "string", "json", "yaml") | `"dict"` | | `verbose` | `bool` | Whether to print progress and intermediate results | `True` | ## Methods @@ -92,9 +68,13 @@ Initializes the LLM Council with council members and a Chairman agent. | Parameter | Type | Default | Description | |-----------|------|---------|-------------| +| `id` | `str` | `swarm_id()` | Unique identifier for the council instance. | +| `name` | `str` | `"LLM Council"` | Name of the council instance. | +| `description` | `str` | `"A collaborative council..."` | Description of the council's purpose. | | `council_members` | `Optional[List[Agent]]` | `None` | List of Agent instances representing council members. If `None`, creates default council with GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4. | | `chairman_model` | `str` | `"gpt-5.1"` | Model name for the Chairman agent that synthesizes responses. | | `verbose` | `bool` | `True` | Whether to print progress and intermediate results. | +| `output_type` | `HistoryOutputType` | `"dict"` | Format for the output. Options: "list", "dict", "string", "final", "json", "yaml", "xml", "dict-all-except-first", "str-all-except-first", "dict-final", "list-final". | #### Returns @@ -105,12 +85,13 @@ Initializes the LLM Council with council members and a Chairman agent. #### Description Creates an LLM Council instance with specialized council members. If no members are provided, it creates a default council consisting of: + - **GPT-5.1-Councilor**: Analytical and comprehensive responses - **Gemini-3-Pro-Councilor**: Concise and well-processed responses - **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced responses - **Grok-4-Councilor**: Creative and innovative responses -The Chairman agent is automatically created with a specialized prompt for synthesizing responses. +The Chairman agent is automatically created with a specialized prompt for synthesizing responses. A `Conversation` object is also initialized to track all messages throughout the workflow, including user queries, council member responses, evaluations, and the final synthesis. #### Example Usage @@ -120,7 +101,7 @@ from swarms.structs.llm_council import LLMCouncil # Create council with default members council = LLMCouncil(verbose=True) -# Create council with custom members +# Create council with custom members and output format from swarms import Agent custom_members = [ Agent(agent_name="Expert-1", model_name="gpt-4", max_loops=1), @@ -129,7 +110,8 @@ custom_members = [ council = LLMCouncil( council_members=custom_members, chairman_model="gpt-4", - verbose=True + verbose=True, + output_type="json" # Output as JSON string ) ``` @@ -137,7 +119,7 @@ council = LLMCouncil( ### `run` -Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis. +Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis. All messages are tracked in the conversation object and formatted according to the `output_type` setting. #### Parameters @@ -149,54 +131,79 @@ Executes the full LLM Council workflow: parallel responses, anonymization, peer | Type | Description | |------|-------------| -| `Dict` | Dictionary containing the following keys: | +| `Union[List, Dict, str]` | Formatted output based on `output_type`. The output contains the conversation history with all messages tracked throughout the workflow. | -#### Return Dictionary Structure +#### Output Format -| Key | Type | Description | -|-----|------|-------------| -| `query` | `str` | The original user query. | -| `original_responses` | `Dict[str, str]` | Dictionary mapping council member names to their original responses. | -| `evaluations` | `Dict[str, str]` | Dictionary mapping evaluator names to their evaluation texts (rankings and reasoning). | -| `final_response` | `str` | The Chairman's synthesized final answer combining all perspectives. | -| `anonymous_mapping` | `Dict[str, str]` | Mapping from anonymous IDs (A, B, C, D) to member names for reference. | +The return value depends on the `output_type` parameter set during initialization: + +- **`"dict"`** (default): Returns conversation as a dictionary/list of message dictionaries +- **`"list"`**: Returns conversation as a list of formatted strings (`"role: content"`) +- **`"string"`** or **`"str"`**: Returns conversation as a formatted string +- **`"final"`** or **`"last"`**: Returns only the content of the final message (Chairman's response) +- **`"json"`**: Returns conversation as a JSON string +- **`"yaml"`**: Returns conversation as a YAML string +- **`"xml"`**: Returns conversation as an XML string +- **`"dict-all-except-first"`**: Returns all messages except the first as a dictionary +- **`"str-all-except-first"`**: Returns all messages except the first as a string +- **`"dict-final"`**: Returns the final message as a dictionary +- **`"list-final"`**: Returns the final message as a list + +#### Conversation Tracking + +All messages are automatically tracked in the conversation object with the following roles: + +- **`"User"`**: The original user query +- **`"{member_name}"`**: Each council member's response (e.g., "GPT-5.1-Councilor") +- **`"{member_name}-Evaluation"`**: Each council member's evaluation (e.g., "GPT-5.1-Councilor-Evaluation") +- **`"Chairman"`**: The final synthesized response #### Description Executes the complete LLM Council workflow: -1. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently` -2. **Collection Phase**: Collects all responses and maps them to member names -3. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity -4. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution` -5. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer +1. **User Query Tracking**: Adds the user query to the conversation as "User" role +2. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently` +3. **Collection Phase**: Collects all responses, maps them to member names, and adds each to the conversation with the member's name as the role +4. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity +5. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution`, then adds evaluations to the conversation with "{member_name}-Evaluation" as the role +6. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer, which is added to the conversation as "Chairman" role +7. **Output Formatting**: Returns the conversation formatted according to the `output_type` setting using `history_output_formatter` -The method provides verbose output by default, showing progress at each stage. +The method provides verbose output by default, showing progress at each stage. All messages are tracked in the `conversation` attribute for later access or export. #### Example Usage ```python from swarms.structs.llm_council import LLMCouncil +# Create council with default output format (dict) council = LLMCouncil(verbose=True) query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" +# Run the council - returns formatted conversation based on output_type result = council.run(query) -# Access the final synthesized response -print(result["final_response"]) +# With default "dict" output_type, result is a list of message dictionaries +# Access conversation messages +for message in result: + print(f"{message['role']}: {message['content'][:200]}...") + +# Access the conversation object directly for more control +conversation = council.conversation +print("\nFinal message:", conversation.get_final_message_content()) -# Access individual member responses -for name, response in result["original_responses"].items(): - print(f"{name}: {response[:200]}...") +# Get conversation as string +print("\nFull conversation:") +print(conversation.get_str()) -# Access evaluation rankings -for evaluator, evaluation in result["evaluations"].items(): - print(f"{evaluator} evaluation:\n{evaluation[:300]}...") +# Example with different output types +council_json = LLMCouncil(output_type="json", verbose=False) +result_json = council_json.run(query) # Returns JSON string -# Check anonymous mapping -print("Anonymous IDs:", result["anonymous_mapping"]) +council_final = LLMCouncil(output_type="final", verbose=False) +result_final = council_final.run(query) # Returns only final response string ``` --- @@ -225,6 +232,7 @@ Internal method that creates the default council configuration with four special - **Grok-4-Councilor** (`model_name="x-ai/grok-4"`): Creative and innovative, temperature=0.8 Each agent is configured with: + - Specialized system prompts matching their role - `max_loops=1` for single-response generation - `verbose=False` to reduce noise during parallel execution @@ -367,25 +375,40 @@ For comprehensive examples demonstrating various use cases, see the [LLM Council ```python from swarms.structs.llm_council import LLMCouncil -# Create the council +# Create the council with default output format council = LLMCouncil(verbose=True) # Example query query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" -# Run the council +# Run the council - returns formatted conversation result = council.run(query) -# Print final response -print(result["final_response"]) +# With default "dict" output_type, result is a list of message dictionaries +# Print all messages +for message in result: + role = message['role'] + content = message['content'] + print(f"\n{role}:") + print(content[:500] + "..." if len(content) > 500 else content) -# Optionally print evaluations -print("\n\n" + "="*80) -print("EVALUATIONS") +# Access conversation object directly for more options +conversation = council.conversation + +# Get only the final response +print("\n" + "="*80) +print("FINAL RESPONSE") print("="*80) -for name, evaluation in result["evaluations"].items(): - print(f"\n{name}:") - print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) +print(conversation.get_final_message_content()) + +# Get conversation as formatted string +print("\n" + "="*80) +print("FULL CONVERSATION") +print("="*80) +print(conversation.get_str()) + +# Export conversation to JSON +conversation.export() ``` ## Customization @@ -428,6 +451,50 @@ council = LLMCouncil( ) ``` +### Custom Output Format + +You can control the output format using the `output_type` parameter: + +```python +# Get output as JSON string +council = LLMCouncil(output_type="json") +result = council.run(query) # Returns JSON string + +# Get only the final response +council = LLMCouncil(output_type="final") +result = council.run(query) # Returns only final response string + +# Get as YAML +council = LLMCouncil(output_type="yaml") +result = council.run(query) # Returns YAML string + +# Get as formatted string +council = LLMCouncil(output_type="string") +result = council.run(query) # Returns formatted conversation string +``` + +### Accessing Conversation History + +The conversation object is accessible for advanced usage: + +```python +council = LLMCouncil() +council.run(query) + +# Access conversation directly +conversation = council.conversation + +# Get conversation history +history = conversation.conversation_history + +# Export to file +conversation.export() # Saves to default location + +# Get specific format +json_output = conversation.to_json() +yaml_output = conversation.return_messages_as_dictionary() +``` + ## Architecture Benefits 1. **Diversity**: Multiple models provide varied perspectives and approaches @@ -436,6 +503,8 @@ council = LLMCouncil( 4. **Transparency**: Full visibility into individual responses and evaluation rankings 5. **Scalability**: Easy to add or remove council members 6. **Flexibility**: Supports custom agents and models +7. **Conversation Tracking**: All messages are automatically tracked in a Conversation object for history and export +8. **Flexible Output**: Multiple output formats supported via `history_output_formatter` (dict, list, string, JSON, YAML, XML, etc.) ## Performance Considerations @@ -443,11 +512,14 @@ council = LLMCouncil( - **Anonymization**: Responses are anonymized to prevent bias in evaluation - **Model Selection**: Different models can be used for different roles based on their strengths - **Verbose Mode**: Can be disabled for production use to reduce output +- **Conversation Management**: Conversation object efficiently tracks all messages in memory and supports export to JSON/YAML files +- **Output Formatting**: Choose lightweight output formats (e.g., "final") for production to reduce memory usage ## Related Documentation - [Multi-Agent Architectures Overview](overview.md) - [Council of Judges](council_of_judges.md) - Similar peer review pattern - [Agent Class Reference](agent.md) - Understanding individual agents +- [Conversation Class Reference](conversation.md) - Understanding conversation tracking and management - [Multi-Agent Execution Utilities](various_execution_methods.md) - Underlying execution methods - +- [History Output Formatter](../../../swarms/utils/history_output_formatter.py) - Output formatting utilities diff --git a/examples/multi_agent/llm_council_examples/business_strategy_council.py b/examples/multi_agent/llm_council_examples/business_strategy_council.py index bacc8995..10b5087b 100644 --- a/examples/multi_agent/llm_council_examples/business_strategy_council.py +++ b/examples/multi_agent/llm_council_examples/business_strategy_council.py @@ -29,4 +29,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py index b69ffb70..7e85d851 100644 --- a/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py +++ b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py @@ -27,4 +27,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/finance_analysis_council.py b/examples/multi_agent/llm_council_examples/finance_analysis_council.py index d1f4c9a5..f014be47 100644 --- a/examples/multi_agent/llm_council_examples/finance_analysis_council.py +++ b/examples/multi_agent/llm_council_examples/finance_analysis_council.py @@ -27,4 +27,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/legal_analysis_council.py b/examples/multi_agent/llm_council_examples/legal_analysis_council.py index 01bdcdc8..5ea3481e 100644 --- a/examples/multi_agent/llm_council_examples/legal_analysis_council.py +++ b/examples/multi_agent/llm_council_examples/legal_analysis_council.py @@ -29,4 +29,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/marketing_strategy_council.py b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py index b033d982..a799c364 100644 --- a/examples/multi_agent/llm_council_examples/marketing_strategy_council.py +++ b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py @@ -26,4 +26,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py index f143945c..90532f38 100644 --- a/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py +++ b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py @@ -34,4 +34,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/medical_treatment_council.py b/examples/multi_agent/llm_council_examples/medical_treatment_council.py index cd828f1d..6084db4c 100644 --- a/examples/multi_agent/llm_council_examples/medical_treatment_council.py +++ b/examples/multi_agent/llm_council_examples/medical_treatment_council.py @@ -28,4 +28,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/research_analysis_council.py b/examples/multi_agent/llm_council_examples/research_analysis_council.py index e276c96b..74a8585a 100644 --- a/examples/multi_agent/llm_council_examples/research_analysis_council.py +++ b/examples/multi_agent/llm_council_examples/research_analysis_council.py @@ -29,4 +29,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/technology_assessment_council.py b/examples/multi_agent/llm_council_examples/technology_assessment_council.py index 72c227a6..4db4dd95 100644 --- a/examples/multi_agent/llm_council_examples/technology_assessment_council.py +++ b/examples/multi_agent/llm_council_examples/technology_assessment_council.py @@ -29,4 +29,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/llm_council_example.py b/llm_council_example.py index 26f4bfec..1cc415d0 100644 --- a/llm_council_example.py +++ b/llm_council_example.py @@ -15,5 +15,8 @@ print(result["final_response"]) # Optionally print evaluations for name, evaluation in result["evaluations"].items(): print(f"\n{name}:") - print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) - + print( + evaluation[:500] + "..." + if len(evaluation) > 500 + else evaluation + ) diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py index 749002db..e64ab828 100644 --- a/swarms/agents/reasoning_agents.py +++ b/swarms/agents/reasoning_agents.py @@ -88,9 +88,7 @@ class ReasoningAgentRouter: eval: bool = False, random_models_on: bool = False, majority_voting_prompt: Optional[str] = None, - reasoning_model_name: Optional[ - str - ] = "gpt-4o", + reasoning_model_name: Optional[str] = "gpt-4o", ): """ Initialize the ReasoningAgentRouter with the specified configuration. diff --git a/swarms/agents/reasoning_duo.py b/swarms/agents/reasoning_duo.py index 81fa0310..581a69e7 100644 --- a/swarms/agents/reasoning_duo.py +++ b/swarms/agents/reasoning_duo.py @@ -35,9 +35,7 @@ class ReasoningDuo: model_names: list[str] = ["gpt-4o-mini", "gpt-4.1"], system_prompt: str = "You are a helpful assistant that can answer questions and help with tasks.", output_type: OutputType = "dict-all-except-first", - reasoning_model_name: Optional[ - str - ] = "gpt-4o", + reasoning_model_name: Optional[str] = "gpt-4o", max_loops: int = 1, *args, **kwargs, diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index 1bc3dc52..141dfe62 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -679,7 +679,7 @@ class AOP: self.tool_configs: Dict[str, AgentToolConfig] = {} self.task_queues: Dict[str, TaskQueue] = {} self.transport = transport - + self.mcp_server = FastMCP( name=server_name, port=port, diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py index b422136b..fa2a6ab5 100644 --- a/swarms/structs/llm_council.py +++ b/swarms/structs/llm_council.py @@ -17,12 +17,14 @@ from swarms.structs.multi_agent_exec import ( run_agents_concurrently, batched_grid_agent_execution, ) - +from swarms.utils.history_output_formatter import HistoryOutputType, history_output_formatter +from swarms.structs.conversation import Conversation +from swarms.structs.swarm_id import swarm_id def get_gpt_councilor_prompt() -> str: """ Get system prompt for GPT-5.1 councilor. - + Returns: System prompt string for GPT-5.1 councilor agent. """ @@ -46,7 +48,7 @@ Remember: You are part of a council where multiple AI models will respond to the def get_gemini_councilor_prompt() -> str: """ Get system prompt for Gemini 3 Pro councilor. - + Returns: System prompt string for Gemini 3 Pro councilor agent. """ @@ -70,7 +72,7 @@ Remember: You are part of a council where multiple AI models will respond to the def get_claude_councilor_prompt() -> str: """ Get system prompt for Claude Sonnet 4.5 councilor. - + Returns: System prompt string for Claude Sonnet 4.5 councilor agent. """ @@ -94,7 +96,7 @@ Remember: You are part of a council where multiple AI models will respond to the def get_grok_councilor_prompt() -> str: """ Get system prompt for Grok-4 councilor. - + Returns: System prompt string for Grok-4 councilor agent. """ @@ -118,7 +120,7 @@ Remember: You are part of a council where multiple AI models will respond to the def get_chairman_prompt() -> str: """ Get system prompt for the Chairman agent. - + Returns: System prompt string for the Chairman agent. """ @@ -141,23 +143,27 @@ Your approach: Remember: You have access to all original responses and all evaluations. Use this rich context to create the best possible final answer.""" -def get_evaluation_prompt(query: str, responses: Dict[str, str], evaluator_name: str) -> str: +def get_evaluation_prompt( + query: str, responses: Dict[str, str], evaluator_name: str +) -> str: """ Create evaluation prompt for council members to review and rank responses. - + Args: query: The original user query responses: Dictionary mapping anonymous IDs to response texts evaluator_name: Name of the agent doing the evaluation - + Returns: Formatted evaluation prompt string """ - responses_text = "\n\n".join([ - f"Response {response_id}:\n{response_text}" - for response_id, response_text in responses.items() - ]) - + responses_text = "\n\n".join( + [ + f"Response {response_id}:\n{response_text}" + for response_id, response_text in responses.items() + ] + ) + return f"""You are evaluating responses from your fellow LLM Council members to the following query: QUERY: {query} @@ -191,30 +197,34 @@ def get_synthesis_prompt( query: str, original_responses: Dict[str, str], evaluations: Dict[str, str], - id_to_member: Dict[str, str] + id_to_member: Dict[str, str], ) -> str: """ Create synthesis prompt for the Chairman. - + Args: query: Original user query original_responses: Dict mapping member names to their responses evaluations: Dict mapping evaluator names to their evaluation texts id_to_member: Mapping from anonymous IDs to member names - + Returns: Formatted synthesis prompt """ - responses_section = "\n\n".join([ - f"=== {name} ===\n{response}" - for name, response in original_responses.items() - ]) - - evaluations_section = "\n\n".join([ - f"=== Evaluation by {name} ===\n{evaluation}" - for name, evaluation in evaluations.items() - ]) - + responses_section = "\n\n".join( + [ + f"=== {name} ===\n{response}" + for name, response in original_responses.items() + ] + ) + + evaluations_section = "\n\n".join( + [ + f"=== Evaluation by {name} ===\n{evaluation}" + for name, evaluation in evaluations.items() + ] + ) + return f"""As the Chairman of the LLM Council, synthesize the following information into a final, comprehensive answer. ORIGINAL QUERY: @@ -246,38 +256,46 @@ class LLMCouncil: """ An LLM Council that orchestrates multiple specialized agents to collaboratively answer queries through independent responses, peer review, and synthesis. - + The council follows this workflow: 1. Dispatch query to all council members in parallel 2. Collect all responses (anonymized) 3. Have each member review and rank all responses 4. Chairman synthesizes everything into final response """ - + def __init__( self, + id: str = swarm_id(), + name: str = "LLM Council", + description: str = "A collaborative council of LLM agents where each member independently answers a query, reviews and ranks anonymized peer responses, and a chairman synthesizes the best elements into a final answer.", council_members: Optional[List[Agent]] = None, chairman_model: str = "gpt-5.1", verbose: bool = True, + output_type: HistoryOutputType = "dict", ): """ Initialize the LLM Council. - + Args: council_members: List of Agent instances representing council members. If None, creates default council with GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4. chairman_model: Model name for the Chairman agent that synthesizes responses. verbose: Whether to print progress and intermediate results. + output_type: Format for the output. Options: "list", "dict", "string", "final", "json", "yaml", etc. """ + self.name = name + self.description = description self.verbose = verbose - + self.output_type = output_type + # Create default council members if none provided if council_members is None: self.council_members = self._create_default_council() else: self.council_members = council_members - + # Create Chairman agent self.chairman = Agent( agent_name="Chairman", @@ -289,19 +307,25 @@ class LLMCouncil: temperature=0.7, ) + self.conversation = Conversation(name=f"[LLM Council] [Conversation][{name}]") + if self.verbose: - print(f"πŸ›οΈ LLM Council initialized with {len(self.council_members)} members") + print( + f"πŸ›οΈ LLM Council initialized with {len(self.council_members)} members" + ) for i, member in enumerate(self.council_members, 1): - print(f" {i}. {member.agent_name} ({member.model_name})") - + print( + f" {i}. {member.agent_name} ({member.model_name})" + ) + def _create_default_council(self) -> List[Agent]: """ Create default council members with specialized prompts and models. - + Returns: List of Agent instances configured as council members. """ - + # GPT-5.1 Agent - Analytical and comprehensive gpt_agent = Agent( agent_name="GPT-5.1-Councilor", @@ -312,7 +336,7 @@ class LLMCouncil: verbose=False, temperature=0.7, ) - + # Gemini 3 Pro Agent - Concise and processed gemini_agent = Agent( agent_name="Gemini-3-Pro-Councilor", @@ -323,7 +347,7 @@ class LLMCouncil: verbose=False, temperature=0.7, ) - + # Claude Sonnet 4.5 Agent - Balanced and thoughtful claude_agent = Agent( agent_name="Claude-Sonnet-4.5-Councilor", @@ -335,7 +359,7 @@ class LLMCouncil: temperature=0.0, top_p=None, ) - + # Grok-4 Agent - Creative and innovative grok_agent = Agent( agent_name="Grok-4-Councilor", @@ -346,114 +370,135 @@ class LLMCouncil: verbose=False, temperature=0.8, ) - + members = [gpt_agent, gemini_agent, claude_agent, grok_agent] - + return members - - def run(self, query: str) -> Dict: + + def run(self, query: str): """ Execute the full LLM Council workflow. - + Args: query: The user's query to process - + Returns: - Dictionary containing: - - original_responses: Dict mapping member names to their responses - - evaluations: Dict mapping evaluator names to their rankings - - final_response: The Chairman's synthesized final answer + Formatted output based on output_type, containing conversation history + with all council member responses, evaluations, and final synthesis. """ if self.verbose: print(f"\n{'='*80}") print("πŸ›οΈ LLM COUNCIL SESSION") - print("="*80) + print("=" * 80) print(f"\nπŸ“ Query: {query}\n") - + + # Add user query to conversation + self.conversation.add(role="User", content=query) + # Step 1: Get responses from all council members in parallel if self.verbose: print("πŸ“€ Dispatching query to all council members...") - + results_dict = run_agents_concurrently( self.council_members, task=query, - return_agent_output_dict=True + return_agent_output_dict=True, ) - + # Map results to member names original_responses = { member.agent_name: response - for member, response in zip(self.council_members, - [results_dict.get(member.agent_name, "") - for member in self.council_members]) + for member, response in zip( + self.council_members, + [ + results_dict.get(member.agent_name, "") + for member in self.council_members + ], + ) } - + + # Add each council member's response to conversation + for member_name, response in original_responses.items(): + self.conversation.add(role=member_name, content=response) + if self.verbose: - print(f"βœ… Received {len(original_responses)} responses\n") + print( + f"βœ… Received {len(original_responses)} responses\n" + ) for name, response in original_responses.items(): print(f" {name}: {response[:100]}...") - + # Step 2: Anonymize responses for evaluation # Create anonymous IDs (A, B, C, D, etc.) - anonymous_ids = [chr(65 + i) for i in range(len(self.council_members))] + anonymous_ids = [ + chr(65 + i) for i in range(len(self.council_members)) + ] random.shuffle(anonymous_ids) # Shuffle to ensure anonymity - + anonymous_responses = { anonymous_ids[i]: original_responses[member.agent_name] for i, member in enumerate(self.council_members) } - + # Create mapping from anonymous ID to member name (for later reference) id_to_member = { anonymous_ids[i]: member.agent_name for i, member in enumerate(self.council_members) } - + if self.verbose: - print("\nπŸ” Council members evaluating each other's responses...") - + print( + "\nπŸ” Council members evaluating each other's responses..." + ) + # Step 3: Have each member evaluate and rank all responses concurrently # Create evaluation tasks for each member evaluation_tasks = [ - get_evaluation_prompt(query, anonymous_responses, member.agent_name) + get_evaluation_prompt( + query, anonymous_responses, member.agent_name + ) for member in self.council_members ] - + # Run evaluations concurrently using batched_grid_agent_execution evaluation_results = batched_grid_agent_execution( - self.council_members, - evaluation_tasks + self.council_members, evaluation_tasks ) - + # Map results to member names evaluations = { member.agent_name: evaluation_results[i] for i, member in enumerate(self.council_members) } - + + # Add each council member's evaluation to conversation + for member_name, evaluation in evaluations.items(): + self.conversation.add( + role=f"{member_name}-Evaluation", content=evaluation + ) + if self.verbose: print(f"βœ… Received {len(evaluations)} evaluations\n") - + # Step 4: Chairman synthesizes everything if self.verbose: print("πŸ‘” Chairman synthesizing final response...\n") - + synthesis_prompt = get_synthesis_prompt( query, original_responses, evaluations, id_to_member ) - + final_response = self.chairman.run(task=synthesis_prompt) - + + # Add chairman's final response to conversation + self.conversation.add(role="Chairman", content=final_response) + if self.verbose: print(f"{'='*80}") print("βœ… FINAL RESPONSE") print(f"{'='*80}\n") - - return { - "query": query, - "original_responses": original_responses, - "evaluations": evaluations, - "final_response": final_response, - "anonymous_mapping": id_to_member, - } + # Format and return output using history_output_formatter + return history_output_formatter( + conversation=self.conversation, type=self.output_type + ) diff --git a/tests/structs/test_auto_swarms_builder.py b/tests/structs/test_auto_swarms_builder.py index 768256e1..1d6e8762 100644 --- a/tests/structs/test_auto_swarms_builder.py +++ b/tests/structs/test_auto_swarms_builder.py @@ -168,7 +168,9 @@ def test_error_handling(): # Test with invalid agent configuration print("Testing invalid agent configuration...") try: - swarm.create_agents_from_specs({"agents": [{"agent_name": ""}]}) + swarm.create_agents_from_specs( + {"agents": [{"agent_name": ""}]} + ) print( "βœ— Should have raised an error for empty agent configuration" ) diff --git a/tests/structs/test_i_agent.py b/tests/structs/test_i_agent.py index 3edf9a8e..1c1f95c5 100644 --- a/tests/structs/test_i_agent.py +++ b/tests/structs/test_i_agent.py @@ -1,5 +1,3 @@ -import pytest - from swarms.agents.i_agent import IterativeReflectiveExpansion diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py index e4a48a20..99dd73ae 100644 --- a/tests/structs/test_sequential_workflow.py +++ b/tests/structs/test_sequential_workflow.py @@ -3,7 +3,6 @@ import pytest from swarms import Agent, SequentialWorkflow - def test_sequential_workflow_initialization_with_agents(): """Test SequentialWorkflow initialization with agents""" agent1 = Agent( From f703f2525b4d9c123a39df4b01f397fbe1d214d9 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 23 Nov 2025 23:28:54 -0800 Subject: [PATCH 35/42] Improved LLM Council docs --- docs/swarms/structs/llm_council.md | 61 +++++++++++++++++------------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/docs/swarms/structs/llm_council.md b/docs/swarms/structs/llm_council.md index 0f83b0d9..e1092bb4 100644 --- a/docs/swarms/structs/llm_council.md +++ b/docs/swarms/structs/llm_council.md @@ -86,10 +86,12 @@ Initializes the LLM Council with council members and a Chairman agent. Creates an LLM Council instance with specialized council members. If no members are provided, it creates a default council consisting of: -- **GPT-5.1-Councilor**: Analytical and comprehensive responses -- **Gemini-3-Pro-Councilor**: Concise and well-processed responses -- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced responses -- **Grok-4-Councilor**: Creative and innovative responses +| Council Member | Description | +|---------------------------------|------------------------------------------| +| **GPT-5.1-Councilor** | Analytical and comprehensive responses | +| **Gemini-3-Pro-Councilor** | Concise and well-processed responses | +| **Claude-Sonnet-4.5-Councilor** | Thoughtful and balanced responses | +| **Grok-4-Councilor** | Creative and innovative responses | The Chairman agent is automatically created with a specialized prompt for synthesizing responses. A `Conversation` object is also initialized to track all messages throughout the workflow, including user queries, council member responses, evaluations, and the final synthesis. @@ -137,17 +139,19 @@ Executes the full LLM Council workflow: parallel responses, anonymization, peer The return value depends on the `output_type` parameter set during initialization: -- **`"dict"`** (default): Returns conversation as a dictionary/list of message dictionaries -- **`"list"`**: Returns conversation as a list of formatted strings (`"role: content"`) -- **`"string"`** or **`"str"`**: Returns conversation as a formatted string -- **`"final"`** or **`"last"`**: Returns only the content of the final message (Chairman's response) -- **`"json"`**: Returns conversation as a JSON string -- **`"yaml"`**: Returns conversation as a YAML string -- **`"xml"`**: Returns conversation as an XML string -- **`"dict-all-except-first"`**: Returns all messages except the first as a dictionary -- **`"str-all-except-first"`**: Returns all messages except the first as a string -- **`"dict-final"`**: Returns the final message as a dictionary -- **`"list-final"`**: Returns the final message as a list +| `output_type` value | Description | +|---------------------------------|---------------------------------------------------------------------| +| **`"dict"`** (default) | Returns conversation as a dictionary/list of message dictionaries | +| **`"list"`** | Returns conversation as a list of formatted strings (`"role: content"`) | +| **`"string"`** or **`"str"`** | Returns conversation as a formatted string | +| **`"final"`** or **`"last"`** | Returns only the content of the final message (Chairman's response) | +| **`"json"`** | Returns conversation as a JSON string | +| **`"yaml"`** | Returns conversation as a YAML string | +| **`"xml"`** | Returns conversation as an XML string | +| **`"dict-all-except-first"`** | Returns all messages except the first as a dictionary | +| **`"str-all-except-first"`** | Returns all messages except the first as a string | +| **`"dict-final"`** | Returns the final message as a dictionary | +| **`"list-final"`** | Returns the final message as a list | #### Conversation Tracking @@ -354,11 +358,14 @@ The LLM Council is ideal for scenarios requiring: ### Common Applications -- **Medical Diagnosis**: Multiple medical AI agents provide diagnoses, evaluate each other, and synthesize recommendations -- **Financial Analysis**: Different financial experts analyze investments and rank each other's assessments -- **Legal Analysis**: Multiple legal perspectives evaluate compliance and risk -- **Business Strategy**: Diverse strategic viewpoints are synthesized into comprehensive plans -- **Research Analysis**: Multiple research perspectives are combined for thorough analysis +| Use Case | Description | +|-----------------------|--------------------------------------------------------------------------------------------------| +| **Medical Diagnosis** | Multiple medical AI agents provide diagnoses, evaluate each other, and synthesize recommendations | +| **Financial Analysis**| Different financial experts analyze investments and rank each other's assessments | +| **Legal Analysis** | Multiple legal perspectives evaluate compliance and risk | +| **Business Strategy** | Diverse strategic viewpoints are synthesized into comprehensive plans | +| **Research Analysis** | Multiple research perspectives are combined for thorough analysis | + ## Examples @@ -508,12 +515,14 @@ yaml_output = conversation.return_messages_as_dictionary() ## Performance Considerations -- **Parallel Execution**: Both response generation and evaluation phases run in parallel for efficiency -- **Anonymization**: Responses are anonymized to prevent bias in evaluation -- **Model Selection**: Different models can be used for different roles based on their strengths -- **Verbose Mode**: Can be disabled for production use to reduce output -- **Conversation Management**: Conversation object efficiently tracks all messages in memory and supports export to JSON/YAML files -- **Output Formatting**: Choose lightweight output formats (e.g., "final") for production to reduce memory usage +| Feature | Description | +|---------------------------|----------------------------------------------------------------------------------------------------------------| +| **Parallel Execution** | Both response generation and evaluation phases run in parallel for efficiency | +| **Anonymization** | Responses are anonymized to prevent bias in evaluation | +| **Model Selection** | Different models can be used for different roles based on their strengths | +| **Verbose Mode** | Can be disabled for production use to reduce output | +| **Conversation Management** | Conversation object efficiently tracks all messages in memory and supports export to JSON/YAML files | +| **Output Formatting** | Choose lightweight output formats (e.g., "final") for production to reduce memory usage | ## Related Documentation From f73b4f6b0711a7e90b96c414fd4fb7dd2a2cb619 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 24 Nov 2025 00:38:30 -0800 Subject: [PATCH 36/42] Integrate LLMCouncil into SwarmRouter --- docs/swarms/structs/swarm_router.md | 26 ++++++++++++++++++++++++++ swarms/structs/llm_council.py | 12 ++++++++++++ swarms/structs/swarm_router.py | 17 ++++++++++++++++- 3 files changed, 54 insertions(+), 1 deletion(-) diff --git a/docs/swarms/structs/swarm_router.md b/docs/swarms/structs/swarm_router.md index 8ccf1203..44bd1c8b 100644 --- a/docs/swarms/structs/swarm_router.md +++ b/docs/swarms/structs/swarm_router.md @@ -42,6 +42,7 @@ Main class for routing tasks to different swarm types. | `verbose` | bool | Flag to enable/disable verbose logging (default: False) | | `worker_tools` | List[Callable] | List of tools available to worker agents | | `aggregation_strategy` | str | Aggregation strategy for HeavySwarm (default: "synthesis") | +| `chairman_model` | str | Model name for the Chairman in LLMCouncil (default: "gpt-5.1") | ### Methods @@ -123,6 +124,7 @@ The `SwarmRouter` supports many various multi-agent architectures for various ap | `InteractiveGroupChat` | Interactive group chat with user participation | | `HeavySwarm` | Heavy swarm architecture with question and worker agents | | `BatchedGridWorkflow` | Batched grid workflow for parallel task processing | +| `LLMCouncil` | Council of specialized LLM agents with peer review and synthesis | | `auto` | Automatically selects best swarm type via embedding search | ## Basic Usage @@ -456,6 +458,30 @@ result = batched_grid_router.run(tasks=["Task 1", "Task 2", "Task 3"]) BatchedGridWorkflow is designed for efficiently processing multiple tasks in parallel batches, optimizing resource utilization. +### LLMCouncil + +Use Case: Collaborative analysis with multiple specialized LLM agents that evaluate each other's responses and synthesize a final answer. + +```python +llm_council_router = SwarmRouter( + name="LLMCouncil", + description="Collaborative council of LLM agents with peer review", + swarm_type="LLMCouncil", + chairman_model="gpt-5.1", # Model for the Chairman agent + output_type="dict", # Output format: "dict", "list", "string", "json", "yaml", "final", etc. + verbose=True # Show progress and intermediate results +) + +result = llm_council_router.run("What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?") +``` + +LLMCouncil creates a council of specialized agents (GPT-5.1, Gemini, Claude, Grok by default) that: +1. Each independently responds to the query +2. Evaluates and ranks each other's anonymized responses +3. A Chairman synthesizes all responses and evaluations into a final comprehensive answer + +The council automatically tracks all messages in a conversation object and supports flexible output formats. Note: LLMCouncil uses default council members and doesn't require the `agents` parameter. + ## Advanced Features ### Processing Documents diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py index fa2a6ab5..a303fa12 100644 --- a/swarms/structs/llm_council.py +++ b/swarms/structs/llm_council.py @@ -502,3 +502,15 @@ class LLMCouncil: return history_output_formatter( conversation=self.conversation, type=self.output_type ) + + def batched_run(self, tasks: List[str]): + """ + Run the LLM Council workflow for a batch of tasks. + + Args: + tasks: List of tasks to process + + Returns: + List of formatted outputs based on output_type + """ + return [self.run(task) for task in tasks] \ No newline at end of file diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index 92903f57..dd13ee08 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -37,6 +37,7 @@ from swarms.telemetry.log_executions import log_execution from swarms.utils.generate_keys import generate_api_key from swarms.utils.loguru_logger import initialize_logger from swarms.utils.output_types import OutputType +from swarms.structs.llm_council import LLMCouncil logger = initialize_logger(log_folder="swarm_router") @@ -56,6 +57,7 @@ SwarmType = Literal[ "InteractiveGroupChat", "HeavySwarm", "BatchedGridWorkflow", + "LLMCouncil", ] @@ -210,6 +212,7 @@ class SwarmRouter: verbose: bool = False, worker_tools: List[Callable] = None, aggregation_strategy: str = "synthesis", + chairman_model: str = "gpt-5.1", *args, **kwargs, ): @@ -252,7 +255,8 @@ class SwarmRouter: self.heavy_swarm_swarm_show_output = ( heavy_swarm_swarm_show_output ) - + self.chairman_model = chairman_model + # Initialize swarm factory for O(1) lookup performance self._swarm_factory = self._initialize_swarm_factory() self._swarm_cache = {} # Cache for created swarms @@ -425,6 +429,7 @@ class SwarmRouter: "SequentialWorkflow": self._create_sequential_workflow, "ConcurrentWorkflow": self._create_concurrent_workflow, "BatchedGridWorkflow": self._create_batched_grid_workflow, + "LLMCouncil": self._create_llm_council, } def _create_heavy_swarm(self, *args, **kwargs): @@ -441,6 +446,16 @@ class SwarmRouter: aggregation_strategy=self.aggregation_strategy, show_dashboard=False, ) + + def _create_llm_council(self, *args, **kwargs): + """Factory function for LLMCouncil.""" + return LLMCouncil( + name=self.name, + description=self.description, + output_type=self.output_type, + verbose=self.verbose, + chairman_model=self.chairman_model, + ) def _create_agent_rearrange(self, *args, **kwargs): """Factory function for AgentRearrange.""" From b16a0263ab5abd20036a7c007bc5dd3daa2105de Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 24 Nov 2025 01:09:23 -0800 Subject: [PATCH 37/42] [CLI][Add LLMCouncil + Heavy Swarm] [Update documentation] --- docs/swarms/cli/cli_reference.md | 288 +++++++++++++++++- llm_council_example.py | 14 +- swarms/cli/main.py | 494 ++++++++++++++++++++++++++++++- swarms/structs/llm_council.py | 18 +- swarms/structs/swarm_router.py | 4 +- 5 files changed, 789 insertions(+), 29 deletions(-) diff --git a/docs/swarms/cli/cli_reference.md b/docs/swarms/cli/cli_reference.md index 7c1ad2e5..b96e8adc 100644 --- a/docs/swarms/cli/cli_reference.md +++ b/docs/swarms/cli/cli_reference.md @@ -5,20 +5,28 @@ The Swarms CLI is a comprehensive command-line interface for managing and execut ## Table of Contents - [Installation](#installation) - - [Basic Usage](#basic-usage) - - [Commands Reference](#commands-reference) - - [Global Arguments](#global-arguments) - - [Command-Specific Arguments](#command-specific-arguments) - + - [run-agents Command](#run-agents-command) + - [load-markdown Command](#load-markdown-command) + - [agent Command](#agent-command) + - [autoswarm Command](#autoswarm-command) + - [setup-check Command](#setup-check-command) + - [llm-council Command](#llm-council-command) + - [heavy-swarm Command](#heavy-swarm-command) + - [features Command](#features-command) - [Error Handling](#error-handling) - - [Examples](#examples) - - [Configuration](#configuration) +- [Advanced Features](#advanced-features) +- [Troubleshooting](#troubleshooting) +- [Integration](#integration) +- [Performance Considerations](#performance-considerations) +- [Security](#security) +- [Command Quick Reference](#command-quick-reference) +- [Support](#support) ## Installation @@ -43,6 +51,7 @@ swarms [options] |---------|-------------|-------------------| | `onboarding` | Start interactive onboarding process | None | | `help` | Display help message | None | +| `features` | Display all available features and actions in a comprehensive table | None | | `get-api-key` | Open API key portal in browser | None | | `check-login` | Verify login status and initialize cache | None | | `run-agents` | Execute agents from YAML configuration | `--yaml-file` | @@ -52,6 +61,8 @@ swarms [options] | `book-call` | Schedule strategy session | None | | `autoswarm` | Generate and execute autonomous swarm | `--task`, `--model` | | `setup-check` | Run comprehensive environment setup check | None | +| `llm-council` | Run LLM Council with multiple agents collaborating on a task | `--task` | +| `heavy-swarm` | Run HeavySwarm with specialized agents for complex task analysis | `--task` | ## Global Arguments @@ -221,6 +232,148 @@ swarms setup-check --verbose β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ ``` +### `llm-council` Command + +Run the LLM Council with multiple specialized agents that collaborate, evaluate, and synthesize responses. + +The LLM Council follows a structured workflow: +1. **Independent Responses**: Each council member (GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, Grok-4) independently responds to the query +2. **Peer Review**: All members review and rank each other's anonymized responses +3. **Synthesis**: A Chairman agent synthesizes all responses and rankings into a final comprehensive answer + +```bash +swarms llm-council [options] +``` + +#### Required Arguments + +| Argument | Type | Description | +|----------|------|-------------| +| `--task` | `str` | The query or question for the LLM Council to process | + +#### Optional Arguments + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--verbose` | `bool` | `True` | Enable verbose output showing progress and intermediate results | + +**Example:** +```bash +# Basic usage +swarms llm-council --task "What are the best energy ETFs right now?" + +# With verbose output +swarms llm-council --task "What is the best approach to solve this problem?" --verbose +``` + +**How It Works:** + +The LLM Council creates a collaborative environment where: +- **Default Council Members**: GPT-5.1 (analytical), Gemini 3 Pro (concise), Claude Sonnet 4.5 (balanced), Grok-4 (creative) +- **Anonymized Evaluation**: Responses are anonymized before evaluation to ensure honest ranking +- **Cross-Model Evaluation**: Each model evaluates all responses, often selecting other models' responses as superior +- **Final Synthesis**: The Chairman (GPT-5.1 by default) synthesizes the best elements from all responses + +**Use Cases:** +- Complex problem-solving requiring multiple perspectives +- Research questions needing comprehensive analysis +- Decision-making scenarios requiring thorough evaluation +- Content generation with quality assurance + +### `heavy-swarm` Command + +Run HeavySwarm with specialized agents for complex task analysis and decomposition. + +HeavySwarm follows a structured workflow: +1. **Task Decomposition**: Breaks down tasks into specialized questions +2. **Parallel Execution**: Executes specialized agents in parallel +3. **Result Synthesis**: Integrates and synthesizes results +4. **Comprehensive Reporting**: Generates detailed final reports +5. **Iterative Refinement**: Optional multi-loop execution for iterative improvement + +```bash +swarms heavy-swarm [options] +``` + +#### Required Arguments + +| Argument | Type | Description | +|----------|------|-------------| +| `--task` | `str` | The task for HeavySwarm to analyze and process | + +#### Optional Arguments + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--loops-per-agent` | `int` | `1` | Number of execution loops each agent should perform | +| `--question-agent-model-name` | `str` | `"gpt-4o-mini"` | Model name for the question generation agent | +| `--worker-model-name` | `str` | `"gpt-4o-mini"` | Model name for specialized worker agents | +| `--random-loops-per-agent` | `bool` | `False` | Enable random number of loops per agent (1-10 range) | +| `--verbose` | `bool` | `False` | Enable verbose output showing detailed progress | + +**Example:** +```bash +# Basic usage +swarms heavy-swarm --task "Analyze the current market trends for renewable energy" + +# With custom configuration +swarms heavy-swarm \ + --task "Research the best investment strategies for 2024" \ + --loops-per-agent 3 \ + --question-agent-model-name "gpt-4" \ + --worker-model-name "gpt-4" \ + --random-loops-per-agent \ + --verbose +``` + +**Specialized Agent Roles:** + +HeavySwarm includes specialized agents for different aspects of analysis: +- **Research Agent**: Fast, trustworthy, and reproducible research +- **Analysis Agent**: Statistical analysis and validated insights +- **Writing Agent**: Clear, structured documentation +- **Question Agent**: Task decomposition and question generation + +**Use Cases:** +- Complex research tasks requiring multiple perspectives +- Market analysis and financial research +- Technical analysis and evaluation +- Comprehensive report generation +- Multi-faceted problem solving + +### `features` Command + +Display all available CLI features and actions in a comprehensive, formatted table. + +This command provides a quick reference to all available features, their categories, descriptions, command syntax, and key parameters. + +```bash +swarms features +``` + +**No arguments required.** + +**Example:** +```bash +swarms features +``` + +**Output Includes:** +- **Main Features Table**: Complete list of all features with: + - Feature name + - Category (Setup, Auth, Execution, Creation, etc.) + - Description + - Command syntax + - Key parameters +- **Category Summary**: Overview of features grouped by category with counts +- **Usage Tips**: Quick tips for using the CLI effectively + +**Use Cases:** +- Quick reference when exploring CLI capabilities +- Discovering available features +- Understanding command syntax and parameters +- Learning about feature categories + ## Error Handling The CLI provides comprehensive error handling with formatted error messages: @@ -289,6 +442,34 @@ swarms autoswarm \ --model "gpt-4" ``` +### LLM Council Collaboration + +```bash +# Run LLM Council for collaborative problem solving +swarms llm-council \ + --task "What are the best strategies for reducing carbon emissions in manufacturing?" \ + --verbose +``` + +### HeavySwarm Complex Analysis + +```bash +# Run HeavySwarm for comprehensive task analysis +swarms heavy-swarm \ + --task "Analyze the impact of AI on the job market in 2024" \ + --loops-per-agent 2 \ + --question-agent-model-name "gpt-4" \ + --worker-model-name "gpt-4" \ + --verbose +``` + +### Viewing All Features + +```bash +# Display all available features +swarms features +``` + ## Configuration ### YAML Configuration Format @@ -386,6 +567,54 @@ Guided setup process including: - Usage examples +### Multi-Agent Collaboration + +The CLI supports advanced multi-agent architectures: + +#### LLM Council + +Collaborative problem-solving with multiple specialized models: + +```bash +swarms llm-council --task "Your question here" +``` + +**Features:** +- Multiple model perspectives (GPT-5.1, Gemini, Claude, Grok) +- Anonymous peer review and ranking +- Synthesized final responses +- Cross-model evaluation + +#### HeavySwarm + +Complex task analysis with specialized agent roles: + +```bash +swarms heavy-swarm --task "Your complex task here" +``` + +**Features:** +- Task decomposition into specialized questions +- Parallel agent execution +- Result synthesis and integration +- Iterative refinement with multiple loops +- Specialized agent roles (Research, Analysis, Writing, Question) + +### Feature Discovery + +Quickly discover all available features: + +```bash +swarms features +``` + +Displays comprehensive tables showing: +- All available commands +- Feature categories +- Command syntax +- Key parameters +- Usage examples + ## Troubleshooting @@ -451,6 +680,8 @@ swarms run-agents --yaml-file agents2.yaml | Model Selection | Choose appropriate models for task complexity | | Context Length | Monitor and optimize input sizes | | Rate Limiting | Respect API provider limits | +| Multi-Agent Execution | LLM Council and HeavySwarm execute agents in parallel for efficiency | +| Loop Configuration | Adjust `--loops-per-agent` based on task complexity and time constraints | ## Security @@ -461,6 +692,48 @@ swarms run-agents --yaml-file agents2.yaml | Input Validation | CLI validates all inputs before execution | | Error Sanitization | Sensitive information is not exposed in errors | +## Command Quick Reference + +### Quick Start Commands + +```bash +# Environment setup +swarms setup-check --verbose +swarms onboarding + +# View all features +swarms features + +# Get help +swarms help +``` + +### Agent Commands + +```bash +# Create custom agent +swarms agent --name "Agent" --task "Task" --system-prompt "Prompt" + +# Run agents from YAML +swarms run-agents --yaml-file agents.yaml + +# Load from markdown +swarms load-markdown --markdown-path ./agents/ +``` + +### Multi-Agent Commands + +```bash +# LLM Council +swarms llm-council --task "Your question" + +# HeavySwarm +swarms heavy-swarm --task "Your complex task" --loops-per-agent 2 --verbose + +# Auto-generate swarm +swarms autoswarm --task "Task description" --model "gpt-4" +``` + ## Support For additional support: @@ -470,3 +743,4 @@ For additional support: | **Community** | [Discord](https://discord.gg/EamjgSaEQf) | | **Issues** | [GitHub Issues](https://github.com/kyegomez/swarms/issues) | | **Strategy Sessions**| [Book a Call](https://cal.com/swarms/swarms-strategy-session) | +| **Documentation** | [Full Documentation](https://docs.swarms.world) | diff --git a/llm_council_example.py b/llm_council_example.py index 1cc415d0..8dc1334a 100644 --- a/llm_council_example.py +++ b/llm_council_example.py @@ -1,7 +1,7 @@ from swarms.structs.llm_council import LLMCouncil # Create the council -council = LLMCouncil(verbose=True) +council = LLMCouncil(verbose=True, output_type="final") # Example query query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" @@ -9,14 +9,4 @@ query = "What are the top five best energy stocks across nuclear, solar, gas, an # Run the council result = council.run(query) -# Print final response -print(result["final_response"]) - -# Optionally print evaluations -for name, evaluation in result["evaluations"].items(): - print(f"\n{name}:") - print( - evaluation[:500] + "..." - if len(evaluation) > 500 - else evaluation - ) +print(result) diff --git a/swarms/cli/main.py b/swarms/cli/main.py index e4458726..a6c6181a 100644 --- a/swarms/cli/main.py +++ b/swarms/cli/main.py @@ -22,6 +22,8 @@ from swarms.agents.create_agents_from_yaml import ( from swarms.structs.agent import Agent from swarms.structs.agent_loader import AgentLoader +from swarms.structs.llm_council import LLMCouncil +from swarms.structs.heavy_swarm import HeavySwarm from swarms.utils.formatter import formatter load_dotenv() @@ -625,6 +627,18 @@ def create_command_table() -> Table: "setup-check", "Run a comprehensive environment setup check", ), + ( + "llm-council", + "Run the LLM Council with multiple agents collaborating on a task", + ), + ( + "heavy-swarm", + "Run HeavySwarm with specialized agents for complex task analysis", + ), + ( + "features", + "Display all available features and actions in a comprehensive table", + ), ] for cmd, desc in commands: @@ -639,7 +653,7 @@ def create_detailed_command_table() -> Table: show_header=True, header_style=f"bold {COLORS['primary']}", border_style=COLORS["secondary"], - title="πŸš€ Swarms CLI - Complete Command Reference", + title="πŸš€Swarms CLI - Complete Command Reference", title_style=f"bold {COLORS['primary']}", padding=(0, 1), show_lines=True, @@ -744,6 +758,27 @@ def create_detailed_command_table() -> Table: "usage": "swarms setup-check [--verbose]", "args": "--verbose", }, + { + "cmd": "llm-council", + "category": "Collaboration", + "desc": "Run LLM Council with multiple agents", + "usage": "swarms llm-council --task 'Your question here' [--verbose]", + "args": "--task, --verbose", + }, + { + "cmd": "heavy-swarm", + "category": "Execution", + "desc": "Run HeavySwarm with specialized agents", + "usage": "swarms heavy-swarm --task 'Your task here' [--loops-per-agent 1] [--question-agent-model-name gpt-4o-mini] [--worker-model-name gpt-4o-mini] [--random-loops-per-agent] [--verbose]", + "args": "--task, --loops-per-agent, --question-agent-model-name, --worker-model-name, --random-loops-per-agent, --verbose", + }, + { + "cmd": "features", + "category": "Info", + "desc": "Display all available features and actions", + "usage": "swarms features", + "args": "None", + }, ] for cmd_info in commands: @@ -758,6 +793,223 @@ def create_detailed_command_table() -> Table: return table +def show_features(): + """ + Display all available CLI features and actions in a comprehensive table. + """ + console.print( + "\n[bold]πŸš€ Swarms CLI - All Available Features[/bold]\n", + style=COLORS["primary"], + ) + + # Create main features table + features_table = Table( + show_header=True, + header_style=f"bold {COLORS['primary']}", + border_style=COLORS["secondary"], + title="✨ Complete Feature Reference", + title_style=f"bold {COLORS['primary']}", + padding=(0, 1), + show_lines=True, + expand=True, + ) + + # Add columns + features_table.add_column( + "Feature", + style=f"bold {COLORS['accent']}", + width=20, + no_wrap=True, + ) + features_table.add_column( + "Category", + style="bold cyan", + width=15, + justify="center", + ) + features_table.add_column( + "Description", + style="white", + width=50, + no_wrap=False, + ) + features_table.add_column( + "Command", + style="dim yellow", + width=35, + no_wrap=False, + ) + features_table.add_column( + "Key Parameters", + style="dim magenta", + width=30, + no_wrap=False, + ) + + # Define all features + features = [ + { + "feature": "Environment Setup", + "category": "Setup", + "desc": "Check and verify your Swarms environment configuration", + "command": "swarms setup-check [--verbose]", + "params": "--verbose", + }, + { + "feature": "Onboarding", + "category": "Setup", + "desc": "Run environment setup check (alias for setup-check)", + "command": "swarms onboarding [--verbose]", + "params": "--verbose", + }, + { + "feature": "API Key Management", + "category": "Setup", + "desc": "Retrieve API keys from the Swarms platform", + "command": "swarms get-api-key", + "params": "None", + }, + { + "feature": "Authentication", + "category": "Auth", + "desc": "Verify login status and initialize authentication cache", + "command": "swarms check-login", + "params": "None", + }, + { + "feature": "YAML Agent Execution", + "category": "Execution", + "desc": "Execute agents from YAML configuration files", + "command": "swarms run-agents --yaml-file agents.yaml", + "params": "--yaml-file", + }, + { + "feature": "Markdown Agent Loading", + "category": "Loading", + "desc": "Load agents from markdown files with YAML frontmatter", + "command": "swarms load-markdown --markdown-path ./agents/", + "params": "--markdown-path, --concurrent", + }, + { + "feature": "Custom Agent Creation", + "category": "Creation", + "desc": "Create and run a custom agent with specified parameters", + "command": "swarms agent --name 'Agent' --task 'Task' --system-prompt 'Prompt'", + "params": "--name, --task, --system-prompt, --model-name, --temperature, --max-loops, --verbose", + }, + { + "feature": "Auto Swarm Generation", + "category": "AI Generation", + "desc": "Automatically generate and execute an autonomous swarm configuration", + "command": "swarms autoswarm --task 'analyze data' --model gpt-4", + "params": "--task, --model", + }, + { + "feature": "LLM Council", + "category": "Collaboration", + "desc": "Run LLM Council with multiple agents collaborating and evaluating responses", + "command": "swarms llm-council --task 'Your question' [--verbose]", + "params": "--task, --verbose", + }, + { + "feature": "HeavySwarm", + "category": "Execution", + "desc": "Run HeavySwarm with specialized agents for complex task analysis", + "command": "swarms heavy-swarm --task 'Your task' [options]", + "params": "--task, --loops-per-agent, --question-agent-model-name, --worker-model-name, --random-loops-per-agent, --verbose", + }, + { + "feature": "Package Upgrade", + "category": "Maintenance", + "desc": "Update Swarms to the latest version", + "command": "swarms auto-upgrade", + "params": "None", + }, + { + "feature": "Support Booking", + "category": "Support", + "desc": "Schedule a strategy session with the Swarms team", + "command": "swarms book-call", + "params": "None", + }, + { + "feature": "Help Documentation", + "category": "Info", + "desc": "Display comprehensive help message with all commands", + "command": "swarms help", + "params": "None", + }, + { + "feature": "Features List", + "category": "Info", + "desc": "Display all available features and actions in a table", + "command": "swarms features", + "params": "None", + }, + ] + + # Add rows to table + for feat in features: + features_table.add_row( + feat["feature"], + feat["category"], + feat["desc"], + feat["command"], + feat["params"], + ) + + console.print(features_table) + + # Add category summary + console.print("\n[bold cyan]πŸ“Š Feature Categories:[/bold cyan]\n") + + category_table = Table( + show_header=True, + header_style=f"bold {COLORS['primary']}", + border_style=COLORS["secondary"], + padding=(0, 2), + ) + + category_table.add_column("Category", style="bold cyan", width=20) + category_table.add_column("Count", style="bold white", justify="center", width=10) + category_table.add_column("Features", style="dim white", width=60) + + # Count features by category + categories = {} + for feat in features: + cat = feat["category"] + if cat not in categories: + categories[cat] = [] + categories[cat].append(feat["feature"]) + + for category, feature_list in sorted(categories.items()): + category_table.add_row( + category, + str(len(feature_list)), + ", ".join(feature_list), + ) + + console.print(category_table) + + # Add usage tips + tips_panel = Panel( + "[bold cyan]πŸ’‘ Quick Tips:[/bold cyan]\n" + "β€’ Use [yellow]swarms features[/yellow] to see this table anytime\n" + "β€’ Use [yellow]swarms help[/yellow] for detailed command documentation\n" + "β€’ Use [yellow]swarms setup-check --verbose[/yellow] for detailed diagnostics\n" + "β€’ Most commands support [yellow]--verbose[/yellow] for detailed output\n" + "β€’ Use [yellow]swarms --help[/yellow] for command-specific help", + title="πŸ“š Usage Tips", + border_style=COLORS["success"], + padding=(1, 2), + ) + console.print(tips_panel) + + console.print( + "\n[dim]For more information, visit: https://docs.swarms.world[/dim]" + ) + + def show_help(): """Display a beautifully formatted help message with comprehensive command reference.""" console.print( @@ -771,7 +1023,10 @@ def show_help(): "β€’ [yellow]swarms onboarding[/yellow] - Environment setup check\n" "β€’ [yellow]swarms setup-check[/yellow] - Check your environment\n" "β€’ [yellow]swarms agent --name 'MyAgent' --task 'Hello World'[/yellow] - Create agent\n" - "β€’ [yellow]swarms autoswarm --task 'analyze data' --model gpt-4[/yellow] - Auto-generate swarm", + "β€’ [yellow]swarms autoswarm --task 'analyze data' --model gpt-4[/yellow] - Auto-generate swarm\n" + "β€’ [yellow]swarms llm-council --task 'Your question'[/yellow] - Run LLM Council\n" + "β€’ [yellow]swarms heavy-swarm --task 'Your task'[/yellow] - Run HeavySwarm\n" + "β€’ [yellow]swarms features[/yellow] - View all available features", title="⚑ Quick Usage Guide", border_style=COLORS["secondary"], padding=(1, 2), @@ -1028,6 +1283,189 @@ def load_markdown_agents( return [] +def run_heavy_swarm( + task: str, + loops_per_agent: int = 1, + question_agent_model_name: str = "gpt-4o-mini", + worker_model_name: str = "gpt-4o-mini", + random_loops_per_agent: bool = False, + verbose: bool = False, +): + """ + Run the HeavySwarm with a given task. + + Args: + task: The task/query for the HeavySwarm to process + loops_per_agent: Number of execution loops each agent should perform + question_agent_model_name: Model name for question generation + worker_model_name: Model name for specialized worker agents + random_loops_per_agent: Enable random number of loops per agent (1-10 range) + verbose: Whether to show verbose output + """ + try: + console.print( + "[yellow]πŸš€ Initializing HeavySwarm...[/yellow]" + ) + + # Create progress display + progress = Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + console=console, + ) + + with progress: + # Add initial task + init_task = progress.add_task( + "Initializing swarm...", total=None + ) + + # Create HeavySwarm + progress.update( + init_task, + description="Creating HeavySwarm with specialized agents...", + ) + swarm = HeavySwarm( + loops_per_agent=loops_per_agent, + question_agent_model_name=question_agent_model_name, + worker_model_name=worker_model_name, + random_loops_per_agent=random_loops_per_agent, + verbose=verbose, + ) + + # Update progress + progress.update( + init_task, + description="Swarm initialized! Processing task...", + ) + + # Run the swarm + result = swarm.run(task=task) + + # Update progress on completion + progress.update( + init_task, + description="Task completed!", + completed=True, + ) + + # Display results + if result: + console.print( + "\n[bold green]βœ“ HeavySwarm completed successfully![/bold green]" + ) + + # Display result in a panel + result_panel = Panel( + str(result), + title="HeavySwarm Final Response", + border_style="green", + padding=(1, 2), + ) + console.print(result_panel) + + return result + else: + console.print( + "[yellow]⚠ HeavySwarm completed but returned no results.[/yellow]" + ) + return None + + except Exception as e: + show_error( + "HeavySwarm Error", + f"Failed to run HeavySwarm: {str(e)}\n\n" + "Please check:\n" + "1. Your API keys are set correctly\n" + "2. You have network connectivity\n" + "3. The task is properly formatted", + ) + return None + + +def run_llm_council(task: str, verbose: bool = True): + """ + Run the LLM Council with a given task. + + Args: + task: The task/query for the LLM Council to process + verbose: Whether to show verbose output + """ + try: + console.print( + "[yellow]πŸ›οΈ Initializing LLM Council...[/yellow]" + ) + + # Create progress display + progress = Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + console=console, + ) + + with progress: + # Add initial task + init_task = progress.add_task( + "Initializing council...", total=None + ) + + # Create LLM Council + progress.update( + init_task, + description="Creating LLM Council with default members...", + ) + council = LLMCouncil(verbose=verbose) + + # Update progress + progress.update( + init_task, + description="Council initialized! Processing task...", + ) + + # Run the council + result = council.run(query=task) + + # Update progress on completion + progress.update( + init_task, + description="Task completed!", + completed=True, + ) + + # Display results + if result: + console.print( + "\n[bold green]βœ“ LLM Council completed successfully![/bold green]" + ) + + # Display result in a panel + result_panel = Panel( + str(result), + title="LLM Council Final Response", + border_style="green", + padding=(1, 2), + ) + console.print(result_panel) + + return result + else: + console.print( + "[yellow]⚠ LLM Council completed but returned no results.[/yellow]" + ) + return None + + except Exception as e: + show_error( + "LLM Council Error", + f"Failed to run LLM Council: {str(e)}\n\n" + "Please check:\n" + "1. Your API keys are set correctly\n" + "2. You have network connectivity\n" + "3. The task is properly formatted", + ) + return None + + def create_swarm_agent( name: str, description: str, @@ -1158,6 +1596,9 @@ def main(): "book-call", "autoswarm", "setup-check", + "llm-council", + "heavy-swarm", + "features", ], help="Command to execute", ) @@ -1285,6 +1726,30 @@ def main(): type=str, help="MCP URL for the agent", ) + # HeavySwarm specific arguments + parser.add_argument( + "--loops-per-agent", + type=int, + default=1, + help="Number of execution loops each agent should perform (default: 1)", + ) + parser.add_argument( + "--question-agent-model-name", + type=str, + default="gpt-4o-mini", + help="Model name for question generation agent (default: gpt-4o-mini)", + ) + parser.add_argument( + "--worker-model-name", + type=str, + default="gpt-4o-mini", + help="Model name for specialized worker agents (default: gpt-4o-mini)", + ) + parser.add_argument( + "--random-loops-per-agent", + action="store_true", + help="Enable random number of loops per agent (1-10 range)", + ) args = parser.parse_args() @@ -1297,6 +1762,8 @@ def main(): run_setup_check(verbose=args.verbose) elif args.command == "help": show_help() + elif args.command == "features": + show_features() elif args.command == "get-api-key": get_api_key() elif args.command == "check-login": @@ -1517,6 +1984,29 @@ def main(): run_autoswarm(args.task, args.model) elif args.command == "setup-check": run_setup_check(verbose=args.verbose) + elif args.command == "llm-council": + if not args.task: + show_error( + "Missing required argument: --task", + "Example usage: swarms llm-council --task 'What is the best approach to solve this problem?'", + ) + exit(1) + run_llm_council(task=args.task, verbose=args.verbose) + elif args.command == "heavy-swarm": + if not args.task: + show_error( + "Missing required argument: --task", + "Example usage: swarms heavy-swarm --task 'Analyze the current market trends'", + ) + exit(1) + run_heavy_swarm( + task=args.task, + loops_per_agent=args.loops_per_agent, + question_agent_model_name=args.question_agent_model_name, + worker_model_name=args.worker_model_name, + random_loops_per_agent=args.random_loops_per_agent, + verbose=args.verbose, + ) except Exception as e: console.print( f"[{COLORS['error']}]Error: {str(e)}[/{COLORS['error']}]" diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py index a303fa12..c732f058 100644 --- a/swarms/structs/llm_council.py +++ b/swarms/structs/llm_council.py @@ -17,10 +17,14 @@ from swarms.structs.multi_agent_exec import ( run_agents_concurrently, batched_grid_agent_execution, ) -from swarms.utils.history_output_formatter import HistoryOutputType, history_output_formatter +from swarms.utils.history_output_formatter import ( + HistoryOutputType, + history_output_formatter, +) from swarms.structs.conversation import Conversation from swarms.structs.swarm_id import swarm_id + def get_gpt_councilor_prompt() -> str: """ Get system prompt for GPT-5.1 councilor. @@ -272,7 +276,7 @@ class LLMCouncil: council_members: Optional[List[Agent]] = None, chairman_model: str = "gpt-5.1", verbose: bool = True, - output_type: HistoryOutputType = "dict", + output_type: HistoryOutputType = "dict-all-except-first", ): """ Initialize the LLM Council. @@ -306,8 +310,10 @@ class LLMCouncil: verbose=verbose, temperature=0.7, ) - - self.conversation = Conversation(name=f"[LLM Council] [Conversation][{name}]") + + self.conversation = Conversation( + name=f"[LLM Council] [Conversation][{name}]" + ) if self.verbose: print( @@ -365,7 +371,7 @@ class LLMCouncil: agent_name="Grok-4-Councilor", agent_description="Creative and innovative AI councilor specializing in unique perspectives and creative solutions", system_prompt=get_grok_councilor_prompt(), - model_name="x-ai/grok-4", # Using available model as proxy for Grok-4 + model_name="xai/grok-4-1-fast-reasoning", # Using available model as proxy for Grok-4 max_loops=1, verbose=False, temperature=0.8, @@ -513,4 +519,4 @@ class LLMCouncil: Returns: List of formatted outputs based on output_type """ - return [self.run(task) for task in tasks] \ No newline at end of file + return [self.run(task) for task in tasks] diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index dd13ee08..57a67a12 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -256,7 +256,7 @@ class SwarmRouter: heavy_swarm_swarm_show_output ) self.chairman_model = chairman_model - + # Initialize swarm factory for O(1) lookup performance self._swarm_factory = self._initialize_swarm_factory() self._swarm_cache = {} # Cache for created swarms @@ -446,7 +446,7 @@ class SwarmRouter: aggregation_strategy=self.aggregation_strategy, show_dashboard=False, ) - + def _create_llm_council(self, *args, **kwargs): """Factory function for LLMCouncil.""" return LLMCouncil( From f2dc945183ef5b92af7ffabe75fdb32f1fd4c7d8 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 24 Nov 2025 01:16:05 -0800 Subject: [PATCH 38/42] [CLI][Examples][CLI Docs] --- examples/README.md | 32 ++++- examples/cli/01_setup_check.sh | 7 + examples/cli/02_onboarding.sh | 7 + examples/cli/03_get_api_key.sh | 7 + examples/cli/04_check_login.sh | 7 + examples/cli/05_create_agent.sh | 12 ++ examples/cli/06_run_agents_yaml.sh | 7 + examples/cli/07_load_markdown.sh | 7 + examples/cli/08_llm_council.sh | 7 + examples/cli/09_heavy_swarm.sh | 7 + examples/cli/10_autoswarm.sh | 7 + examples/cli/11_features.sh | 7 + examples/cli/12_help.sh | 7 + examples/cli/13_auto_upgrade.sh | 7 + examples/cli/14_book_call.sh | 7 + examples/cli/README.md | 197 +++++++++++++++++++++++++++++ examples/cli/run_all_examples.sh | 11 ++ 17 files changed, 336 insertions(+), 7 deletions(-) create mode 100644 examples/cli/01_setup_check.sh create mode 100644 examples/cli/02_onboarding.sh create mode 100644 examples/cli/03_get_api_key.sh create mode 100644 examples/cli/04_check_login.sh create mode 100644 examples/cli/05_create_agent.sh create mode 100644 examples/cli/06_run_agents_yaml.sh create mode 100644 examples/cli/07_load_markdown.sh create mode 100644 examples/cli/08_llm_council.sh create mode 100644 examples/cli/09_heavy_swarm.sh create mode 100644 examples/cli/10_autoswarm.sh create mode 100644 examples/cli/11_features.sh create mode 100644 examples/cli/12_help.sh create mode 100644 examples/cli/13_auto_upgrade.sh create mode 100644 examples/cli/14_book_call.sh create mode 100644 examples/cli/README.md create mode 100644 examples/cli/run_all_examples.sh diff --git a/examples/README.md b/examples/README.md index 34259fd4..ac908c7a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -79,16 +79,26 @@ This directory contains comprehensive examples demonstrating various capabilitie - [README.md](ui/README.md) - UI examples documentation - [chat.py](ui/chat.py) - Chat interface example +### Command Line Interface + +- **[cli/](cli/)** - CLI command examples demonstrating all available Swarms CLI features including setup, agent management, multi-agent architectures, and utilities. + - [README.md](cli/README.md) - CLI examples documentation + - [01_setup_check.sh](cli/01_setup_check.sh) - Environment setup verification + - [05_create_agent.sh](cli/05_create_agent.sh) - Create custom agents + - [08_llm_council.sh](cli/08_llm_council.sh) - LLM Council collaboration + - [09_heavy_swarm.sh](cli/09_heavy_swarm.sh) - HeavySwarm complex analysis + ## Quick Start 1. **New to Swarms?** Start with [single_agent/simple_agent.py](single_agent/simple_agent.py) for basic concepts -2. **Want multi-agent workflows?** Check out [multi_agent/duo_agent.py](multi_agent/duo_agent.py) -3. **Need tool integration?** Explore [tools/agent_as_tools.py](tools/agent_as_tools.py) -4. **Interested in AOP?** Try [aop_examples/client/example_new_agent_tools.py](aop_examples/client/example_new_agent_tools.py) for agent discovery -5. **Want to see social algorithms?** Check out [multi_agent/social_algorithms_examples/](multi_agent/social_algorithms_examples/) -6. **Looking for guides?** Visit [guides/](guides/) for comprehensive tutorials -7. **Need RAG?** Try [rag/qdrant_rag_example.py](rag/qdrant_rag_example.py) -8. **Want reasoning agents?** Check out [reasoning_agents/example_o3.py](reasoning_agents/example_o3.py) +2. **Want to use the CLI?** Check out [cli/](cli/) for all CLI command examples +3. **Want multi-agent workflows?** Check out [multi_agent/duo_agent.py](multi_agent/duo_agent.py) +4. **Need tool integration?** Explore [tools/agent_as_tools.py](tools/agent_as_tools.py) +5. **Interested in AOP?** Try [aop_examples/client/example_new_agent_tools.py](aop_examples/client/example_new_agent_tools.py) for agent discovery +6. **Want to see social algorithms?** Check out [multi_agent/social_algorithms_examples/](multi_agent/social_algorithms_examples/) +7. **Looking for guides?** Visit [guides/](guides/) for comprehensive tutorials +8. **Need RAG?** Try [rag/qdrant_rag_example.py](rag/qdrant_rag_example.py) +9. **Want reasoning agents?** Check out [reasoning_agents/example_o3.py](reasoning_agents/example_o3.py) ## Key Examples by Category @@ -122,6 +132,14 @@ This directory contains comprehensive examples demonstrating various capabilitie - [Azure](single_agent/llms/azure_agent.py) - Azure OpenAI - [Ollama](models/simple_example_ollama.py) - Local Ollama models +### CLI Examples + +- [Setup Check](cli/01_setup_check.sh) - Verify environment setup +- [Create Agent](cli/05_create_agent.sh) - Create custom agents via CLI +- [LLM Council](cli/08_llm_council.sh) - Run LLM Council collaboration +- [HeavySwarm](cli/09_heavy_swarm.sh) - Run HeavySwarm for complex tasks +- [All CLI Examples](cli/) - Complete CLI examples directory + ## Documentation Each subdirectory contains its own README.md file with detailed descriptions and links to all available examples. Click on any folder above to explore its specific examples and use cases. diff --git a/examples/cli/01_setup_check.sh b/examples/cli/01_setup_check.sh new file mode 100644 index 00000000..523c806a --- /dev/null +++ b/examples/cli/01_setup_check.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Setup Check Example +# Verify your Swarms environment setup + +swarms setup-check + diff --git a/examples/cli/02_onboarding.sh b/examples/cli/02_onboarding.sh new file mode 100644 index 00000000..973a5630 --- /dev/null +++ b/examples/cli/02_onboarding.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Onboarding Example +# Start the interactive onboarding process + +swarms onboarding + diff --git a/examples/cli/03_get_api_key.sh b/examples/cli/03_get_api_key.sh new file mode 100644 index 00000000..f9775413 --- /dev/null +++ b/examples/cli/03_get_api_key.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Get API Key Example +# Open API key portal in browser + +swarms get-api-key + diff --git a/examples/cli/04_check_login.sh b/examples/cli/04_check_login.sh new file mode 100644 index 00000000..41479137 --- /dev/null +++ b/examples/cli/04_check_login.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Check Login Example +# Verify authentication status + +swarms check-login + diff --git a/examples/cli/05_create_agent.sh b/examples/cli/05_create_agent.sh new file mode 100644 index 00000000..eb4ed597 --- /dev/null +++ b/examples/cli/05_create_agent.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Swarms CLI - Create Agent Example +# Create and run a custom agent + +swarms agent \ + --name "Research Agent" \ + --description "AI research specialist" \ + --system-prompt "You are an expert research agent." \ + --task "Analyze current trends in renewable energy" \ + --model-name "gpt-4o-mini" + diff --git a/examples/cli/06_run_agents_yaml.sh b/examples/cli/06_run_agents_yaml.sh new file mode 100644 index 00000000..1856c54f --- /dev/null +++ b/examples/cli/06_run_agents_yaml.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Run Agents from YAML Example +# Execute agents from YAML configuration file + +swarms run-agents --yaml-file agents.yaml + diff --git a/examples/cli/07_load_markdown.sh b/examples/cli/07_load_markdown.sh new file mode 100644 index 00000000..b1ba6e56 --- /dev/null +++ b/examples/cli/07_load_markdown.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Load Markdown Agents Example +# Load agents from markdown files + +swarms load-markdown --markdown-path ./agents/ + diff --git a/examples/cli/08_llm_council.sh b/examples/cli/08_llm_council.sh new file mode 100644 index 00000000..eb29b726 --- /dev/null +++ b/examples/cli/08_llm_council.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - LLM Council Example +# Run LLM Council for collaborative problem-solving + +swarms llm-council --task "What are the best energy ETFs to invest in right now?" + diff --git a/examples/cli/09_heavy_swarm.sh b/examples/cli/09_heavy_swarm.sh new file mode 100644 index 00000000..6dfadc00 --- /dev/null +++ b/examples/cli/09_heavy_swarm.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - HeavySwarm Example +# Run HeavySwarm for complex task analysis + +swarms heavy-swarm --task "Analyze current market trends for renewable energy investments" + diff --git a/examples/cli/10_autoswarm.sh b/examples/cli/10_autoswarm.sh new file mode 100644 index 00000000..b94192f0 --- /dev/null +++ b/examples/cli/10_autoswarm.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Autoswarm Example +# Auto-generate swarm configuration + +swarms autoswarm --task "Analyze quarterly sales data" --model "gpt-4" + diff --git a/examples/cli/11_features.sh b/examples/cli/11_features.sh new file mode 100644 index 00000000..687200a4 --- /dev/null +++ b/examples/cli/11_features.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Features Example +# Display all available CLI features + +swarms features + diff --git a/examples/cli/12_help.sh b/examples/cli/12_help.sh new file mode 100644 index 00000000..09b6780c --- /dev/null +++ b/examples/cli/12_help.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Help Example +# Display comprehensive help documentation + +swarms help + diff --git a/examples/cli/13_auto_upgrade.sh b/examples/cli/13_auto_upgrade.sh new file mode 100644 index 00000000..6827f995 --- /dev/null +++ b/examples/cli/13_auto_upgrade.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Auto Upgrade Example +# Update Swarms to the latest version + +swarms auto-upgrade + diff --git a/examples/cli/14_book_call.sh b/examples/cli/14_book_call.sh new file mode 100644 index 00000000..e0108d9e --- /dev/null +++ b/examples/cli/14_book_call.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Book Call Example +# Schedule a strategy session + +swarms book-call + diff --git a/examples/cli/README.md b/examples/cli/README.md new file mode 100644 index 00000000..a002cd96 --- /dev/null +++ b/examples/cli/README.md @@ -0,0 +1,197 @@ +# Swarms CLI Examples + +This directory contains shell script examples demonstrating all available Swarms CLI commands and features. Each script is simple, focused, and demonstrates a single CLI command. + +## Quick Start + +All scripts are executable. Run them directly: + +```bash +chmod +x *.sh +./01_setup_check.sh +``` + +Or execute with bash: + +```bash +bash 01_setup_check.sh +``` + +## Available Examples + +### Setup & Configuration + +- **[01_setup_check.sh](examples/cli/01_setup_check.sh)** - Environment setup verification + ```bash + swarms setup-check + ``` + +- **[02_onboarding.sh](examples/cli/02_onboarding.sh)** - Interactive onboarding process + ```bash + swarms onboarding + ``` + +- **[03_get_api_key.sh](examples/cli/03_get_api_key.sh)** - Retrieve API keys + ```bash + swarms get-api-key + ``` + +- **[04_check_login.sh](examples/cli/04_check_login.sh)** - Verify authentication + ```bash + swarms check-login + ``` + +### Agent Management + +- **[05_create_agent.sh](examples/cli/05_create_agent.sh)** - Create and run custom agents + ```bash + swarms agent --name "Agent" --description "Description" --system-prompt "Prompt" --task "Task" + ``` + +- **[06_run_agents_yaml.sh](examples/cli/06_run_agents_yaml.sh)** - Execute agents from YAML + ```bash + swarms run-agents --yaml-file agents.yaml + ``` + +- **[07_load_markdown.sh](examples/cli/07_load_markdown.sh)** - Load agents from markdown files + ```bash + swarms load-markdown --markdown-path ./agents/ + ``` + +### Multi-Agent Architectures + +- **[08_llm_council.sh](examples/cli/08_llm_council.sh)** - Run LLM Council collaboration + ```bash + swarms llm-council --task "Your question here" + ``` + +- **[09_heavy_swarm.sh](examples/cli/09_heavy_swarm.sh)** - Run HeavySwarm for complex tasks + ```bash + swarms heavy-swarm --task "Your complex task here" + ``` + +- **[10_autoswarm.sh](examples/cli/10_autoswarm.sh)** - Auto-generate swarm configurations + ```bash + swarms autoswarm --task "Task description" --model "gpt-4" + ``` + +### Utilities + +- **[11_features.sh](examples/cli/11_features.sh)** - Display all available features + ```bash + swarms features + ``` + +- **[12_help.sh](examples/cli/12_help.sh)** - Display help documentation + ```bash + swarms help + ``` + +- **[13_auto_upgrade.sh](examples/cli/13_auto_upgrade.sh)** - Update Swarms package + ```bash + swarms auto-upgrade + ``` + +- **[14_book_call.sh](examples/cli/14_book_call.sh)** - Schedule strategy session + ```bash + swarms book-call + ``` + +### Run All Examples + +- **[run_all_examples.sh](examples/cli/run_all_examples.sh)** - Run multiple examples in sequence + ```bash + bash run_all_examples.sh + ``` + +## Script Structure + +Each script follows a simple pattern: + +1. **Shebang** - `#!/bin/bash` +2. **Comment** - Brief description of what the script does +3. **Single Command** - One CLI command execution + +Example: +```bash +#!/bin/bash + +# Swarms CLI - Setup Check Example +# Verify your Swarms environment setup + +swarms setup-check +``` + +## Usage Patterns + +### Basic Command Execution + +```bash +swarms [options] +``` + +### With Verbose Output + +```bash +swarms --verbose +``` + +### Environment Variables + +Set API keys before running scripts that require them: + +```bash +export OPENAI_API_KEY="your-key-here" +export ANTHROPIC_API_KEY="your-key-here" +export GOOGLE_API_KEY="your-key-here" +``` + +## Examples by Category + +### Setup & Diagnostics +- Environment setup verification +- Onboarding workflow +- API key management +- Authentication verification + +### Single Agent Operations +- Custom agent creation +- Agent configuration from YAML +- Agent loading from markdown + +### Multi-Agent Operations +- LLM Council for collaborative problem-solving +- HeavySwarm for complex analysis +- Auto-generated swarm configurations + +### Information & Help +- Feature discovery +- Help documentation +- Package management + +## File Paths + +All scripts are located in `examples/cli/`: + +- `examples/cli/01_setup_check.sh` +- `examples/cli/02_onboarding.sh` +- `examples/cli/03_get_api_key.sh` +- `examples/cli/04_check_login.sh` +- `examples/cli/05_create_agent.sh` +- `examples/cli/06_run_agents_yaml.sh` +- `examples/cli/07_load_markdown.sh` +- `examples/cli/08_llm_council.sh` +- `examples/cli/09_heavy_swarm.sh` +- `examples/cli/10_autoswarm.sh` +- `examples/cli/11_features.sh` +- `examples/cli/12_help.sh` +- `examples/cli/13_auto_upgrade.sh` +- `examples/cli/14_book_call.sh` +- `examples/cli/run_all_examples.sh` + +## Related Documentation + +- [CLI Reference](../../docs/swarms/cli/cli_reference.md) - Complete CLI documentation +- [Main Examples README](../README.md) - Other Swarms examples +- [Swarms Documentation](../../docs/) - Full Swarms documentation + diff --git a/examples/cli/run_all_examples.sh b/examples/cli/run_all_examples.sh new file mode 100644 index 00000000..ffd948e8 --- /dev/null +++ b/examples/cli/run_all_examples.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Swarms CLI - Run All Examples +# Run all CLI examples in sequence + +chmod +x *.sh + +swarms setup-check +swarms features +swarms help + From 8f8ed788d0522909367ecc1581c8bed6306bbc5d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 10:58:11 +0000 Subject: [PATCH 39/42] Update ruff requirement from >=0.5.1,<0.14.5 to >=0.5.1,<0.14.7 Updates the requirements on [ruff](https://github.com/astral-sh/ruff) to permit the latest version. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.5.1...0.14.6) --- updated-dependencies: - dependency-name: ruff dependency-version: 0.14.6 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0336f41f..d947301a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,7 +85,7 @@ swarms = "swarms.cli.main:main" [tool.poetry.group.lint.dependencies] black = ">=23.1,<26.0" -ruff = ">=0.5.1,<0.14.5" +ruff = ">=0.5.1,<0.14.7" types-toml = "^0.10.8.1" types-pytz = ">=2023.3,<2026.0" types-chardet = "^5.0.4.6" From 6a079cea8d2f5a151d8d4dce78f15510156eb355 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 10:58:49 +0000 Subject: [PATCH 40/42] Update pymdown-extensions requirement from ~=10.16 to ~=10.17 Updates the requirements on [pymdown-extensions](https://github.com/facelessuser/pymdown-extensions) to permit the latest version. - [Release notes](https://github.com/facelessuser/pymdown-extensions/releases) - [Commits](https://github.com/facelessuser/pymdown-extensions/compare/10.16...10.17.1) --- updated-dependencies: - dependency-name: pymdown-extensions dependency-version: 10.17.1 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 4e9c01f7..3746a2a8 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -27,7 +27,7 @@ jinja2~=3.1 markdown~=3.10 mkdocs-material-extensions~=1.3 pygments~=2.19 -pymdown-extensions~=10.16 +pymdown-extensions~=10.17 # Requirements for plugins colorama~=0.4 From b40fc1f58ea61aa0de601f04b9b79d82fc0be1b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 11:10:00 +0000 Subject: [PATCH 41/42] Bump actions/checkout from 5 to 6 Bumps [actions/checkout](https://github.com/actions/checkout) from 5 to 6. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/RELEASE.yml | 2 +- .github/workflows/codacy.yml | 2 +- .github/workflows/code-quality-and-tests.yml | 2 +- .github/workflows/codeql.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/docs.yml | 2 +- .github/workflows/lint.yml | 2 +- .github/workflows/pyre.yml | 2 +- .github/workflows/pysa.yml | 2 +- .github/workflows/python-package.yml | 2 +- .github/workflows/test-main-features.yml | 4 ++-- .github/workflows/tests.yml | 2 +- .github/workflows/trivy.yml | 2 +- 13 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/RELEASE.yml b/.github/workflows/RELEASE.yml index 2f20cb89..4ce68db3 100644 --- a/.github/workflows/RELEASE.yml +++ b/.github/workflows/RELEASE.yml @@ -17,7 +17,7 @@ jobs: && ${{ contains(github.event.pull_request.labels.*.name, 'release') }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Install poetry run: pipx install poetry==$POETRY_VERSION - name: Set up Python 3.9 diff --git a/.github/workflows/codacy.yml b/.github/workflows/codacy.yml index 632d9e03..9473405b 100644 --- a/.github/workflows/codacy.yml +++ b/.github/workflows/codacy.yml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 # Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis - name: Run Codacy Analysis CLI uses: codacy/codacy-analysis-cli-action@562ee3e92b8e92df8b67e0a5ff8aa8e261919c08 diff --git a/.github/workflows/code-quality-and-tests.yml b/.github/workflows/code-quality-and-tests.yml index 935b0448..025ee93b 100644 --- a/.github/workflows/code-quality-and-tests.yml +++ b/.github/workflows/code-quality-and-tests.yml @@ -16,7 +16,7 @@ jobs: steps: # Step 1: Check out the repository - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 # Step 2: Set up Python - name: Set up Python ${{ matrix.python-version }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 17ff6bb3..7bf20661 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -28,7 +28,7 @@ jobs: language: ["python"] steps: - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Initialize CodeQL uses: github/codeql-action/init@v4 with: diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 507a2882..c71cc45e 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout repository' - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: 'Dependency Review' uses: actions/dependency-review-action@v4 # Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options. diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b933fd6e..7a4a8e5b 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,7 +9,7 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: actions/setup-python@v6 with: python-version: 3.11 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 2128be8a..9a6a5409 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -6,7 +6,7 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python uses: actions/setup-python@v6 diff --git a/.github/workflows/pyre.yml b/.github/workflows/pyre.yml index 336569b5..bf5d085b 100644 --- a/.github/workflows/pyre.yml +++ b/.github/workflows/pyre.yml @@ -33,7 +33,7 @@ jobs: security-events: write runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: submodules: true diff --git a/.github/workflows/pysa.yml b/.github/workflows/pysa.yml index 5f913465..590a6432 100644 --- a/.github/workflows/pysa.yml +++ b/.github/workflows/pysa.yml @@ -35,7 +35,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: submodules: true diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 1fb79863..4adb50b3 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -21,7 +21,7 @@ jobs: python-version: ["3.10", "3.11", "3.12"] steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v6 with: diff --git a/.github/workflows/test-main-features.yml b/.github/workflows/test-main-features.yml index 1ff92794..23b920ec 100644 --- a/.github/workflows/test-main-features.yml +++ b/.github/workflows/test-main-features.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Set up Python 3.10 uses: actions/setup-python@v6 @@ -121,7 +121,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Set up Python 3.10 uses: actions/setup-python@v6 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 88a9430e..0223eb67 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python 3.10 uses: actions/setup-python@v6 diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index b71ef7d4..3f72ed12 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -27,7 +27,7 @@ jobs: runs-on: "ubuntu-20.04" steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Build an image from Dockerfile run: | From b29199ce6011bcd5d1ed7fe57250fae94830fd54 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 25 Nov 2025 01:19:26 -0800 Subject: [PATCH 42/42] update examples readme with links --- examples/README.md | 73 ++++++++++++++----- .../llm_council_example.py | 0 .../README.md | 0 .../agent_overview.py | 0 .../batch_example.py | 0 .../client_example.py | 0 .../hospital_team.py | 0 .../icd_ten_analysis.py | 0 .../legal_team.py | 0 .../rate_limits.py | 0 10 files changed, 54 insertions(+), 19 deletions(-) rename llm_council_example.py => examples/multi_agent/llm_council_examples/llm_council_example.py (100%) rename examples/{swarms_api_examples => swarms_api}/README.md (100%) rename examples/{swarms_api_examples => swarms_api}/agent_overview.py (100%) rename examples/{swarms_api_examples => swarms_api}/batch_example.py (100%) rename examples/{swarms_api_examples => swarms_api}/client_example.py (100%) rename examples/{swarms_api_examples => swarms_api}/hospital_team.py (100%) rename examples/{swarms_api_examples => swarms_api}/icd_ten_analysis.py (100%) rename examples/{swarms_api_examples => swarms_api}/legal_team.py (100%) rename examples/{swarms_api_examples => swarms_api}/rate_limits.py (100%) diff --git a/examples/README.md b/examples/README.md index ac908c7a..499b9349 100644 --- a/examples/README.md +++ b/examples/README.md @@ -6,60 +6,90 @@ This directory contains comprehensive examples demonstrating various capabilitie ### Multi-Agent Systems -- **[multi_agent/](multi_agent/)** - Advanced multi-agent patterns including agent rearrangement, auto swarm builder (ASB), batched workflows, board of directors, caching, concurrent processing, councils, debates, elections, forest swarms, graph workflows, group chats, heavy swarms, hierarchical swarms, majority voting, orchestration examples, social algorithms, simulations, spreadsheet examples, and swarm routing. +- **[multi_agent/](multi_agent/)** - Advanced multi-agent patterns including agent rearrangement, auto swarm builder (ASB), batched workflows, board of directors, caching, concurrent processing, councils, debates, elections, forest swarms, graph workflows, group chats, heavy swarms, hierarchical swarms, LLM council, majority voting, orchestration examples, paper implementations, sequential workflows, social algorithms, simulations, spreadsheet examples, swarm routing, and utilities. - [README.md](multi_agent/README.md) - Complete multi-agent examples documentation + - [duo_agent.py](multi_agent/duo_agent.py) - Two-agent collaboration example + - [llm_council_examples/](multi_agent/llm_council_examples/) - LLM Council collaboration patterns + - [caching_examples/](multi_agent/caching_examples/) - Agent caching examples ### Single Agent Systems -- **[single_agent/](single_agent/)** - Single agent implementations including demos, external agent integrations, LLM integrations (Azure, Claude, DeepSeek, Mistral, OpenAI, Qwen), onboarding, RAG, reasoning agents, tools integration, utils, and vision capabilities. +- **[single_agent/](single_agent/)** - Single agent implementations including demos, external agent integrations, LLM integrations (Azure, Claude, DeepSeek, Mistral, OpenAI, Qwen), onboarding, RAG, reasoning agents, tools integration, utils, vision capabilities, and MCP integration. - [README.md](single_agent/README.md) - Complete single agent examples documentation - [simple_agent.py](single_agent/simple_agent.py) - Basic single agent example + - [agent_mcp.py](single_agent/agent_mcp.py) - MCP integration example + - [rag/](single_agent/rag/) - Retrieval Augmented Generation (RAG) implementations with vector database integrations ### Tools & Integrations - **[tools/](tools/)** - Tool integration examples including agent-as-tools, base tool implementations, browser automation, Claude integration, Exa search, Firecrawl, multi-tool usage, and Stagehand integration. - [README.md](tools/README.md) - Complete tools examples documentation - [agent_as_tools.py](tools/agent_as_tools.py) - Using agents as tools + - [browser_use_as_tool.py](tools/browser_use_as_tool.py) - Browser automation tool + - [exa_search_agent.py](tools/exa_search_agent.py) - Exa search integration + - [firecrawl_agents_example.py](tools/firecrawl_agents_example.py) - Firecrawl integration + - [base_tool_examples/](tools/base_tool_examples/) - Base tool implementation examples + - [multii_tool_use/](tools/multii_tool_use/) - Multi-tool usage examples + - [stagehand/](tools/stagehand/) - Stagehand UI automation ### Model Integrations -- **[models/](models/)** - Various model integrations including Cerebras, GPT-5, GPT-OSS, Llama 4, Lumo, and Ollama implementations with concurrent processing examples and provider-specific configurations. +- **[models/](models/)** - Various model integrations including Cerebras, GPT-5, GPT-OSS, Llama 4, Lumo, O3, Ollama, and vLLM implementations with concurrent processing examples and provider-specific configurations. - [README.md](models/README.md) - Model integration documentation - [simple_example_ollama.py](models/simple_example_ollama.py) - Ollama integration example - [cerebas_example.py](models/cerebas_example.py) - Cerebras model example - [lumo_example.py](models/lumo_example.py) - Lumo model example + - [example_o3.py](models/example_o3.py) - O3 model example + - [gpt_5/](models/gpt_5/) - GPT-5 model examples + - [gpt_oss_examples/](models/gpt_oss_examples/) - GPT-OSS examples + - [llama4_examples/](models/llama4_examples/) - Llama 4 examples + - [main_providers/](models/main_providers/) - Main provider configurations + - [vllm/](models/vllm/) - vLLM integration examples ### API & Protocols -- **[swarms_api_examples/](swarms_api_examples/)** - Swarms API usage examples including agent overview, batch processing, client integration, team examples, analysis, and rate limiting. - - [README.md](swarms_api_examples/README.md) - API examples documentation - - [client_example.py](swarms_api_examples/client_example.py) - API client example - - [batch_example.py](swarms_api_examples/batch_example.py) - Batch processing example +- **[swarms_api/](swarms_api/)** - Swarms API usage examples including agent overview, batch processing, client integration, team examples, analysis, and rate limiting. + - [README.md](swarms_api/README.md) - API examples documentation + - [client_example.py](swarms_api/client_example.py) - API client example + - [batch_example.py](swarms_api/batch_example.py) - Batch processing example + - [hospital_team.py](swarms_api/hospital_team.py) - Hospital management team simulation + - [legal_team.py](swarms_api/legal_team.py) - Legal team collaboration example + - [icd_ten_analysis.py](swarms_api/icd_ten_analysis.py) - ICD-10 medical code analysis + - [rate_limits.py](swarms_api/rate_limits.py) - Rate limiting and throttling examples -- **[mcp/](mcp/)** - Model Context Protocol (MCP) integration examples including agent implementations, multi-connection setups, server configurations, and utility functions. +- **[mcp/](mcp/)** - Model Context Protocol (MCP) integration examples including agent implementations, multi-connection setups, server configurations, utility functions, and multi-MCP guides. - [README.md](mcp/README.md) - MCP examples documentation - [multi_mcp_example.py](mcp/multi_mcp_example.py) - Multi-MCP connection example + - [agent_examples/](mcp/agent_examples/) - Agent-based MCP examples + - [servers/](mcp/servers/) - MCP server implementations + - [mcp_utils/](mcp/mcp_utils/) - MCP utility functions + - [multi_mcp_guide/](mcp/multi_mcp_guide/) - Multi-MCP setup guides -- **[aop_examples/](aop_examples/)** - Agents over Protocol (AOP) examples demonstrating MCP server setup, agent discovery, client interactions, queue-based task submission, and medical AOP implementations. +- **[aop_examples/](aop_examples/)** - Agents over Protocol (AOP) examples demonstrating MCP server setup, agent discovery, client interactions, queue-based task submission, medical AOP implementations, and utility functions. - [README.md](aop_examples/README.md) - AOP examples documentation - [server.py](aop_examples/server.py) - AOP server implementation + - [client/](aop_examples/client/) - AOP client examples and agent discovery + - [discovery/](aop_examples/discovery/) - Agent discovery examples + - [medical_aop/](aop_examples/medical_aop/) - Medical AOP implementations + - [utils/](aop_examples/utils/) - AOP utility functions ### Advanced Capabilities -- **[reasoning_agents/](reasoning_agents/)** - Advanced reasoning capabilities including agent judge evaluation systems, O3 model integration, and mixture of agents (MOA) sequential examples. +- **[reasoning_agents/](reasoning_agents/)** - Advanced reasoning capabilities including agent judge evaluation systems, O3 model integration, mixture of agents (MOA) sequential examples, and reasoning agent router examples. - [README.md](reasoning_agents/README.md) - Reasoning agents documentation - - [example_o3.py](reasoning_agents/example_o3.py) - O3 model example - [moa_seq_example.py](reasoning_agents/moa_seq_example.py) - MOA sequential example - -- **[rag/](rag/)** - Retrieval Augmented Generation (RAG) implementations with vector database integrations including Qdrant examples. - - [README.md](rag/README.md) - RAG documentation - - [qdrant_rag_example.py](rag/qdrant_rag_example.py) - Qdrant RAG example + - [agent_judge_examples/](reasoning_agents/agent_judge_examples/) - Agent judge evaluation systems + - [reasoning_agent_router_examples/](reasoning_agents/reasoning_agent_router_examples/) - Reasoning agent router examples ### Guides & Tutorials -- **[guides/](guides/)** - Comprehensive guides and tutorials including generation length blog, geo guesser agent, graph workflow guide, hierarchical marketing team, nano banana Jarvis agent, smart database, web scraper agents, and workshop examples (840_update, 850_workshop). +- **[guides/](guides/)** - Comprehensive guides and tutorials including demos, generation length blog, geo guesser agent, graph workflow guide, hackathon examples, hierarchical marketing team, nano banana Jarvis agent, smart database, web scraper agents, workshops, x402 examples, and workshop examples (840_update, 850_workshop). - [README.md](guides/README.md) - Guides documentation - [hiearchical_marketing_team.py](guides/hiearchical_marketing_team.py) - Hierarchical marketing team example + - [demos/](guides/demos/) - Various demonstration examples + - [hackathons/](guides/hackathons/) - Hackathon project examples + - [workshops/](guides/workshops/) - Workshop examples + - [x402_examples/](guides/x402_examples/) - X402 protocol examples ### Deployment @@ -72,6 +102,11 @@ This directory contains comprehensive examples demonstrating various capabilitie - **[utils/](utils/)** - Utility functions and helper implementations including agent loader, communication examples, concurrent wrappers, miscellaneous utilities, and telemetry. - [README.md](utils/README.md) - Utils documentation + - [agent_loader/](utils/agent_loader/) - Agent loading utilities + - [communication_examples/](utils/communication_examples/) - Agent communication patterns + - [concurrent_wrapper_examples.py](utils/concurrent_wrapper_examples.py) - Concurrent processing wrappers + - [misc/](utils/misc/) - Miscellaneous utility functions + - [telemetry/](utils/telemetry/) - Telemetry and monitoring utilities ### User Interface @@ -97,8 +132,8 @@ This directory contains comprehensive examples demonstrating various capabilitie 5. **Interested in AOP?** Try [aop_examples/client/example_new_agent_tools.py](aop_examples/client/example_new_agent_tools.py) for agent discovery 6. **Want to see social algorithms?** Check out [multi_agent/social_algorithms_examples/](multi_agent/social_algorithms_examples/) 7. **Looking for guides?** Visit [guides/](guides/) for comprehensive tutorials -8. **Need RAG?** Try [rag/qdrant_rag_example.py](rag/qdrant_rag_example.py) -9. **Want reasoning agents?** Check out [reasoning_agents/example_o3.py](reasoning_agents/example_o3.py) +8. **Need RAG?** Try [single_agent/rag/](single_agent/rag/) for RAG examples +9. **Want reasoning agents?** Check out [reasoning_agents/](reasoning_agents/) for reasoning agent examples ## Key Examples by Category @@ -115,7 +150,7 @@ This directory contains comprehensive examples demonstrating various capabilitie - [Simple Agent](single_agent/simple_agent.py) - Basic agent setup - [Reasoning Agents](single_agent/reasoning_agent_examples/) - Advanced reasoning patterns - [Vision Agents](single_agent/vision/multimodal_example.py) - Vision and multimodal capabilities -- [RAG Agents](single_agent/rag/qdrant_rag_example.py) - Retrieval augmented generation +- [RAG Agents](single_agent/rag/) - Retrieval augmented generation ### Tool Integrations diff --git a/llm_council_example.py b/examples/multi_agent/llm_council_examples/llm_council_example.py similarity index 100% rename from llm_council_example.py rename to examples/multi_agent/llm_council_examples/llm_council_example.py diff --git a/examples/swarms_api_examples/README.md b/examples/swarms_api/README.md similarity index 100% rename from examples/swarms_api_examples/README.md rename to examples/swarms_api/README.md diff --git a/examples/swarms_api_examples/agent_overview.py b/examples/swarms_api/agent_overview.py similarity index 100% rename from examples/swarms_api_examples/agent_overview.py rename to examples/swarms_api/agent_overview.py diff --git a/examples/swarms_api_examples/batch_example.py b/examples/swarms_api/batch_example.py similarity index 100% rename from examples/swarms_api_examples/batch_example.py rename to examples/swarms_api/batch_example.py diff --git a/examples/swarms_api_examples/client_example.py b/examples/swarms_api/client_example.py similarity index 100% rename from examples/swarms_api_examples/client_example.py rename to examples/swarms_api/client_example.py diff --git a/examples/swarms_api_examples/hospital_team.py b/examples/swarms_api/hospital_team.py similarity index 100% rename from examples/swarms_api_examples/hospital_team.py rename to examples/swarms_api/hospital_team.py diff --git a/examples/swarms_api_examples/icd_ten_analysis.py b/examples/swarms_api/icd_ten_analysis.py similarity index 100% rename from examples/swarms_api_examples/icd_ten_analysis.py rename to examples/swarms_api/icd_ten_analysis.py diff --git a/examples/swarms_api_examples/legal_team.py b/examples/swarms_api/legal_team.py similarity index 100% rename from examples/swarms_api_examples/legal_team.py rename to examples/swarms_api/legal_team.py diff --git a/examples/swarms_api_examples/rate_limits.py b/examples/swarms_api/rate_limits.py similarity index 100% rename from examples/swarms_api_examples/rate_limits.py rename to examples/swarms_api/rate_limits.py