From d817ee791361014dd5ba1ed8061a06cbf5eecfee Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 19:05:03 -0800 Subject: [PATCH 01/16] added run_async to agent rearrange and handeled test sequantil workflow init --- swarms/structs/agent_rearrange.py | 40 +++++++++++++++++++++++ tests/structs/test_sequential_workflow.py | 14 ++------ 2 files changed, 43 insertions(+), 11 deletions(-) diff --git a/swarms/structs/agent_rearrange.py b/swarms/structs/agent_rearrange.py index d3016de4..c962a518 100644 --- a/swarms/structs/agent_rearrange.py +++ b/swarms/structs/agent_rearrange.py @@ -908,6 +908,46 @@ class AgentRearrange: except Exception as e: self._catch_error(e) + async def run_async( + self, + task: str, + img: Optional[str] = None, + *args, + **kwargs, + ) -> Any: + """ + Asynchronously executes a task through the agent workflow. + + This method enables asynchronous execution of tasks by running the + synchronous run method in a separate thread using asyncio.to_thread. + This is ideal for integrating the agent workflow into async applications + or when you want non-blocking execution. + + Args: + task (str): The task to be executed through the agent workflow. + img (Optional[str]): Optional image input for the task. Defaults to None. + *args: Additional positional arguments passed to the run method. + **kwargs: Additional keyword arguments passed to the run method. + + Returns: + Any: The result of the task execution, format depends on output_type setting. + + Raises: + Exception: If an error occurs during task execution. + + Note: + This method uses asyncio.to_thread to run the synchronous run method + asynchronously, allowing integration with async/await patterns. + """ + import asyncio + + try: + return await asyncio.to_thread( + self.run, task=task, img=img, *args, **kwargs + ) + except Exception as e: + self._catch_error(e) + def _serialize_callable( self, attr_value: Callable ) -> Dict[str, Any]: diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py index 6d8f74a1..f905fe47 100644 --- a/tests/structs/test_sequential_workflow.py +++ b/tests/structs/test_sequential_workflow.py @@ -5,17 +5,9 @@ from swarms import Agent, SequentialWorkflow # Test SequentialWorkflow class def test_sequential_workflow_initialization(): - workflow = SequentialWorkflow() - assert isinstance(workflow, SequentialWorkflow) - assert len(workflow.tasks) == 0 - assert workflow.max_loops == 1 - assert workflow.autosave is False - assert ( - workflow.saved_state_filepath - == "sequential_workflow_state.json" - ) - assert workflow.restore_state_filepath is None - assert workflow.dashboard is False + # SequentialWorkflow requires agents, so expect ValueError + with pytest.raises(ValueError, match="Agents list cannot be None or empty"): + workflow = SequentialWorkflow() def test_sequential_workflow_initialization_with_agents(): From 4635903c84227334f1db0f427519b430879b451c Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 19:19:18 -0800 Subject: [PATCH 02/16] added import asyncio to the top --- swarms/structs/agent_rearrange.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/swarms/structs/agent_rearrange.py b/swarms/structs/agent_rearrange.py index c962a518..a0155ef6 100644 --- a/swarms/structs/agent_rearrange.py +++ b/swarms/structs/agent_rearrange.py @@ -1,7 +1,7 @@ import json from concurrent.futures import ThreadPoolExecutor from typing import Any, Callable, Dict, List, Optional, Union - +import asyncio from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation from swarms.structs.multi_agent_exec import run_agents_concurrently @@ -939,7 +939,6 @@ class AgentRearrange: This method uses asyncio.to_thread to run the synchronous run method asynchronously, allowing integration with async/await patterns. """ - import asyncio try: return await asyncio.to_thread( From c8e4cd68116d493fae76c116f40b3de9a2548334 Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 20:55:46 -0800 Subject: [PATCH 03/16] fixed reasoning agent to not use sonnet --- swarms/agents/reasoning_agents.py | 2 +- swarms/agents/reasoning_duo.py | 2 +- tests/structs/test_reasoning_agent_router.py | 5 ++++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py index 122ccb01..749002db 100644 --- a/swarms/agents/reasoning_agents.py +++ b/swarms/agents/reasoning_agents.py @@ -90,7 +90,7 @@ class ReasoningAgentRouter: majority_voting_prompt: Optional[str] = None, reasoning_model_name: Optional[ str - ] = "claude-3-5-sonnet-20240620", + ] = "gpt-4o", ): """ Initialize the ReasoningAgentRouter with the specified configuration. diff --git a/swarms/agents/reasoning_duo.py b/swarms/agents/reasoning_duo.py index c0ddc156..81fa0310 100644 --- a/swarms/agents/reasoning_duo.py +++ b/swarms/agents/reasoning_duo.py @@ -37,7 +37,7 @@ class ReasoningDuo: output_type: OutputType = "dict-all-except-first", reasoning_model_name: Optional[ str - ] = "claude-3-5-sonnet-20240620", + ] = "gpt-4o", max_loops: int = 1, *args, **kwargs, diff --git a/tests/structs/test_reasoning_agent_router.py b/tests/structs/test_reasoning_agent_router.py index cf5a8782..2507058c 100644 --- a/tests/structs/test_reasoning_agent_router.py +++ b/tests/structs/test_reasoning_agent_router.py @@ -6,6 +6,9 @@ from swarms.agents.reasoning_agents import ( ReasoningAgentInitializationError, ReasoningAgentRouter, ) +from dotenv import load_dotenv + +load_dotenv() def test_router_initialization(): @@ -55,7 +58,7 @@ def test_router_initialization(): eval=True, random_models_on=True, majority_voting_prompt="Custom voting prompt", - reasoning_model_name="claude-3-5-sonnet-20240620", + reasoning_model_name="gpt-4o", ) assert ( custom_router is not None From 59857a2d44a9dbba8cb42560ee73c9f118b1156c Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 21:08:03 -0800 Subject: [PATCH 04/16] addedf raise to hierarchical --- swarms/structs/hiearchical_swarm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py index 1501ccb6..873d56b1 100644 --- a/swarms/structs/hiearchical_swarm.py +++ b/swarms/structs/hiearchical_swarm.py @@ -914,6 +914,7 @@ class HierarchicalSwarm: logger.error( f"{error_msg}\n[TRACE] Traceback: {traceback.format_exc()}\n[BUG] If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues" ) + raise def agents_no_print(self): for agent in self.agents: From a59e8c8dc0b9f5bd8ef0bbbfba251c29440543d2 Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Wed, 19 Nov 2025 21:28:21 -0800 Subject: [PATCH 05/16] removed useless tests --- tests/structs/test_sequential_workflow.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py index f905fe47..e4a48a20 100644 --- a/tests/structs/test_sequential_workflow.py +++ b/tests/structs/test_sequential_workflow.py @@ -3,12 +3,6 @@ import pytest from swarms import Agent, SequentialWorkflow -# Test SequentialWorkflow class -def test_sequential_workflow_initialization(): - # SequentialWorkflow requires agents, so expect ValueError - with pytest.raises(ValueError, match="Agents list cannot be None or empty"): - workflow = SequentialWorkflow() - def test_sequential_workflow_initialization_with_agents(): """Test SequentialWorkflow initialization with agents""" From aeb5044bf7abf6848a8617fc1e1e112ac50830f3 Mon Sep 17 00:00:00 2001 From: Steve-Dusty <66390533+Steve-Dusty@users.noreply.github.com> Date: Thu, 20 Nov 2025 19:18:11 -0800 Subject: [PATCH 06/16] added raise e --- swarms/structs/hiearchical_swarm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py index 873d56b1..49b63595 100644 --- a/swarms/structs/hiearchical_swarm.py +++ b/swarms/structs/hiearchical_swarm.py @@ -914,7 +914,7 @@ class HierarchicalSwarm: logger.error( f"{error_msg}\n[TRACE] Traceback: {traceback.format_exc()}\n[BUG] If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues" ) - raise + raise e def agents_no_print(self): for agent in self.agents: From fea72a12e1796913e613acddb0881698da5c1ee7 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 21 Nov 2025 14:40:10 -0800 Subject: [PATCH 07/16] [FEAT][rustworkx integration into GraphWorkflow] [New Examples] --- example.py | 4 +- examples/aop_examples/server.py | 8 +- .../rustworkx_examples/01_basic_usage.py | 46 ++ .../02_backend_comparison.py | 56 ++ .../03_fan_out_fan_in_patterns.py | 73 ++ .../rustworkx_examples/04_complex_workflow.py | 101 +++ .../05_performance_benchmark.py | 104 +++ .../rustworkx_examples/06_error_handling.py | 55 ++ .../07_large_scale_workflow.py | 61 ++ .../08_parallel_chain_example.py | 73 ++ .../09_workflow_validation.py | 79 +++ .../10_real_world_scenario.py | 122 ++++ ...n_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png | Bin 0 -> 28198 bytes .../rustworkx_examples/README.md | 156 +++++ .../test_graph_workflow_rustworkx.py | 632 +++++++++++++++++ .../multi_agent/swarm_router/swarm_router.py | 1 - swarms/structs/aop.py | 11 +- swarms/structs/graph_workflow.py | 654 +++++++++++++++++- tests/structs/test_custom_agent.py | 144 +++- tests/structs/test_deep_discussion.py | 77 ++- tests/structs/test_graph_workflow.py | 552 +++++++++++++++ .../test_graph_workflow_comprehensive.py | 225 ------ tests/structs/test_multi_agent_debate.py | 312 ++++++--- 23 files changed, 3134 insertions(+), 412 deletions(-) create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/Fan-Out-Fan-In-Workflow_visualization_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md create mode 100644 examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py create mode 100644 tests/structs/test_graph_workflow.py delete mode 100644 tests/structs/test_graph_workflow_comprehensive.py diff --git a/example.py b/example.py index d13636db..386b6597 100644 --- a/example.py +++ b/example.py @@ -10,7 +10,6 @@ agent = Agent( dynamic_context_window=True, streaming_on=False, top_p=None, - # stream=True, ) out = agent.run( @@ -18,5 +17,4 @@ out = agent.run( n=1, ) -for token in out: - print(token, end="", flush=True) +print(out) diff --git a/examples/aop_examples/server.py b/examples/aop_examples/server.py index adcaaa2c..b91bcbaa 100644 --- a/examples/aop_examples/server.py +++ b/examples/aop_examples/server.py @@ -92,7 +92,13 @@ financial_agent = Agent( ) # Basic usage - individual agent addition -deployer = AOP(server_name="MyAgentServer", verbose=True, port=5932, json_response=True, queue_enabled=False) +deployer = AOP( + server_name="MyAgentServer", + verbose=True, + port=5932, + json_response=True, + queue_enabled=False, +) agents = [ research_agent, diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py new file mode 100644 index 00000000..a9d0a344 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py @@ -0,0 +1,46 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +research_agent = Agent( + agent_name="Research-Analyst", + agent_description="Specialized in comprehensive research and data gathering", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +analysis_agent = Agent( + agent_name="Data-Analyst", + agent_description="Expert in data analysis and pattern recognition", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_agent = Agent( + agent_name="Strategy-Consultant", + agent_description="Specialized in strategic planning and recommendations", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Rustworkx-Basic-Workflow", + description="Basic workflow using rustworkx backend for faster graph operations", + backend="rustworkx", + verbose=False, +) + +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) +workflow.add_node(strategy_agent) + +workflow.add_edge(research_agent, analysis_agent) +workflow.add_edge(analysis_agent, strategy_agent) + +task = "Conduct a research analysis on water stocks and ETFs" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py new file mode 100644 index 00000000..35cfe83e --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py @@ -0,0 +1,56 @@ +import time +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agents = [ + Agent( + agent_name=f"Agent-{i}", + agent_description=f"Agent number {i}", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(5) +] + +nx_workflow = GraphWorkflow( + name="NetworkX-Workflow", + backend="networkx", + verbose=False, +) + +for agent in agents: + nx_workflow.add_node(agent) + +for i in range(len(agents) - 1): + nx_workflow.add_edge(agents[i], agents[i + 1]) + +nx_start = time.time() +nx_workflow.compile() +nx_compile_time = time.time() - nx_start + +rx_workflow = GraphWorkflow( + name="Rustworkx-Workflow", + backend="rustworkx", + verbose=False, +) + +for agent in agents: + rx_workflow.add_node(agent) + +for i in range(len(agents) - 1): + rx_workflow.add_edge(agents[i], agents[i + 1]) + +rx_start = time.time() +rx_workflow.compile() +rx_compile_time = time.time() - rx_start + +speedup = ( + nx_compile_time / rx_compile_time if rx_compile_time > 0 else 0 +) +print(f"NetworkX compile time: {nx_compile_time:.4f}s") +print(f"Rustworkx compile time: {rx_compile_time:.4f}s") +print(f"Speedup: {speedup:.2f}x") +print( + f"Identical layers: {nx_workflow._sorted_layers == rx_workflow._sorted_layers}" +) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py new file mode 100644 index 00000000..8be4fecf --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py @@ -0,0 +1,73 @@ +from swarms import Agent, GraphWorkflow + +coordinator = Agent( + agent_name="Coordinator", + agent_description="Coordinates and distributes tasks", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +tech_analyst = Agent( + agent_name="Tech-Analyst", + agent_description="Technical analysis specialist", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +fundamental_analyst = Agent( + agent_name="Fundamental-Analyst", + agent_description="Fundamental analysis specialist", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +sentiment_analyst = Agent( + agent_name="Sentiment-Analyst", + agent_description="Sentiment analysis specialist", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +synthesis_agent = Agent( + agent_name="Synthesis-Agent", + agent_description="Synthesizes multiple analyses into final report", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Fan-Out-Fan-In-Workflow", + description="Demonstrates parallel processing patterns with rustworkx", + backend="rustworkx", + verbose=False, +) + +workflow.add_node(coordinator) +workflow.add_node(tech_analyst) +workflow.add_node(fundamental_analyst) +workflow.add_node(sentiment_analyst) +workflow.add_node(synthesis_agent) + +workflow.add_edges_from_source( + coordinator, + [tech_analyst, fundamental_analyst, sentiment_analyst], +) + +workflow.add_edges_to_target( + [tech_analyst, fundamental_analyst, sentiment_analyst], + synthesis_agent, +) + +task = "Analyze Tesla stock from technical, fundamental, and sentiment perspectives" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") + + +workflow.visualize(view=True) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py new file mode 100644 index 00000000..4f025a71 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py @@ -0,0 +1,101 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +data_collector_1 = Agent( + agent_name="Data-Collector-1", + agent_description="Collects market data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +data_collector_2 = Agent( + agent_name="Data-Collector-2", + agent_description="Collects financial data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +technical_analyst = Agent( + agent_name="Technical-Analyst", + agent_description="Performs technical analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +fundamental_analyst = Agent( + agent_name="Fundamental-Analyst", + agent_description="Performs fundamental analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +risk_analyst = Agent( + agent_name="Risk-Analyst", + agent_description="Performs risk analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_consultant = Agent( + agent_name="Strategy-Consultant", + agent_description="Develops strategic recommendations", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +report_writer = Agent( + agent_name="Report-Writer", + agent_description="Writes comprehensive reports", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Complex-Multi-Layer-Workflow", + description="Complex workflow with multiple layers and parallel processing", + backend="rustworkx", + verbose=False, +) + +all_agents = [ + data_collector_1, + data_collector_2, + technical_analyst, + fundamental_analyst, + risk_analyst, + strategy_consultant, + report_writer, +] + +for agent in all_agents: + workflow.add_node(agent) + +workflow.add_parallel_chain( + [data_collector_1, data_collector_2], + [technical_analyst, fundamental_analyst, risk_analyst], +) + +workflow.add_edges_to_target( + [technical_analyst, fundamental_analyst, risk_analyst], + strategy_consultant, +) + +workflow.add_edges_to_target( + [technical_analyst, fundamental_analyst, risk_analyst], + report_writer, +) + +workflow.add_edge(strategy_consultant, report_writer) + +task = "Conduct a comprehensive analysis of the renewable energy sector including market trends, financial health, and risk assessment" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py new file mode 100644 index 00000000..2b5251f7 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py @@ -0,0 +1,104 @@ +import time +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agents_small = [ + Agent( + agent_name=f"Agent-{i}", + agent_description=f"Agent number {i}", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(5) +] + +agents_medium = [ + Agent( + agent_name=f"Agent-{i}", + agent_description=f"Agent number {i}", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(20) +] + +nx_workflow_small = GraphWorkflow( + name="NetworkX-Small", + backend="networkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_small: + nx_workflow_small.add_node(agent) + +for i in range(len(agents_small) - 1): + nx_workflow_small.add_edge(agents_small[i], agents_small[i + 1]) + +nx_start = time.time() +nx_workflow_small.compile() +nx_small_time = time.time() - nx_start + +rx_workflow_small = GraphWorkflow( + name="Rustworkx-Small", + backend="rustworkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_small: + rx_workflow_small.add_node(agent) + +for i in range(len(agents_small) - 1): + rx_workflow_small.add_edge(agents_small[i], agents_small[i + 1]) + +rx_start = time.time() +rx_workflow_small.compile() +rx_small_time = time.time() - rx_start + +nx_workflow_medium = GraphWorkflow( + name="NetworkX-Medium", + backend="networkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_medium: + nx_workflow_medium.add_node(agent) + +for i in range(len(agents_medium) - 1): + nx_workflow_medium.add_edge( + agents_medium[i], agents_medium[i + 1] + ) + +nx_start = time.time() +nx_workflow_medium.compile() +nx_medium_time = time.time() - nx_start + +rx_workflow_medium = GraphWorkflow( + name="Rustworkx-Medium", + backend="rustworkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_medium: + rx_workflow_medium.add_node(agent) + +for i in range(len(agents_medium) - 1): + rx_workflow_medium.add_edge( + agents_medium[i], agents_medium[i + 1] + ) + +rx_start = time.time() +rx_workflow_medium.compile() +rx_medium_time = time.time() - rx_start + +print( + f"Small (5 agents) - NetworkX: {nx_small_time:.4f}s, Rustworkx: {rx_small_time:.4f}s, Speedup: {nx_small_time/rx_small_time if rx_small_time > 0 else 0:.2f}x" +) +print( + f"Medium (20 agents) - NetworkX: {nx_medium_time:.4f}s, Rustworkx: {rx_medium_time:.4f}s, Speedup: {nx_medium_time/rx_medium_time if rx_medium_time > 0 else 0:.2f}x" +) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py new file mode 100644 index 00000000..3fd9f25c --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py @@ -0,0 +1,55 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +test_agent = Agent( + agent_name="Test-Agent", + agent_description="Test agent for error handling", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow_rx = GraphWorkflow( + name="Rustworkx-Workflow", + backend="rustworkx", + verbose=False, +) +workflow_rx.add_node(test_agent) + +workflow_nx = GraphWorkflow( + name="NetworkX-Workflow", + backend="networkx", + verbose=False, +) +workflow_nx.add_node(test_agent) + +workflow_default = GraphWorkflow( + name="Default-Workflow", + verbose=False, +) +workflow_default.add_node(test_agent) + +workflow_invalid = GraphWorkflow( + name="Invalid-Workflow", + backend="invalid_backend", + verbose=False, +) +workflow_invalid.add_node(test_agent) + +print( + f"Rustworkx backend: {type(workflow_rx.graph_backend).__name__}" +) +print(f"NetworkX backend: {type(workflow_nx.graph_backend).__name__}") +print( + f"Default backend: {type(workflow_default.graph_backend).__name__}" +) +print( + f"Invalid backend fallback: {type(workflow_invalid.graph_backend).__name__}" +) + +try: + import rustworkx as rx + + print("Rustworkx available: True") +except ImportError: + print("Rustworkx available: False") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py new file mode 100644 index 00000000..edaeef0c --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py @@ -0,0 +1,61 @@ +import time +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +NUM_AGENTS = 30 + +agents = [ + Agent( + agent_name=f"Agent-{i:02d}", + agent_description=f"Agent number {i} in large-scale workflow", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(NUM_AGENTS) +] + +workflow = GraphWorkflow( + name="Large-Scale-Workflow", + description=f"Large-scale workflow with {NUM_AGENTS} agents using rustworkx", + backend="rustworkx", + verbose=False, +) + +start_time = time.time() +for agent in agents: + workflow.add_node(agent) +add_nodes_time = time.time() - start_time + +start_time = time.time() +for i in range(9): + workflow.add_edge(agents[i], agents[i + 1]) + +workflow.add_edges_from_source( + agents[5], + agents[10:20], +) + +workflow.add_edges_to_target( + agents[10:20], + agents[20], +) + +for i in range(20, 29): + workflow.add_edge(agents[i], agents[i + 1]) + +add_edges_time = time.time() - start_time + +start_time = time.time() +workflow.compile() +compile_time = time.time() - start_time + +print( + f"Agents: {len(workflow.nodes)}, Edges: {len(workflow.edges)}, Layers: {len(workflow._sorted_layers)}" +) +print( + f"Node addition: {add_nodes_time:.4f}s, Edge addition: {add_edges_time:.4f}s, Compilation: {compile_time:.4f}s" +) +print( + f"Total setup: {add_nodes_time + add_edges_time + compile_time:.4f}s" +) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py new file mode 100644 index 00000000..21b18d23 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py @@ -0,0 +1,73 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +data_collector_1 = Agent( + agent_name="Data-Collector-1", + agent_description="Collects market data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +data_collector_2 = Agent( + agent_name="Data-Collector-2", + agent_description="Collects financial data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +data_collector_3 = Agent( + agent_name="Data-Collector-3", + agent_description="Collects news data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +technical_analyst = Agent( + agent_name="Technical-Analyst", + agent_description="Performs technical analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +fundamental_analyst = Agent( + agent_name="Fundamental-Analyst", + agent_description="Performs fundamental analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +sentiment_analyst = Agent( + agent_name="Sentiment-Analyst", + agent_description="Performs sentiment analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Parallel-Chain-Workflow", + description="Demonstrates parallel chain pattern with rustworkx", + backend="rustworkx", + verbose=False, +) + +sources = [data_collector_1, data_collector_2, data_collector_3] +targets = [technical_analyst, fundamental_analyst, sentiment_analyst] + +for agent in sources + targets: + workflow.add_node(agent) + +workflow.add_parallel_chain(sources, targets) + +workflow.compile() + +task = "Analyze the technology sector using multiple data sources and analysis methods" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py new file mode 100644 index 00000000..79c2de3d --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py @@ -0,0 +1,79 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agent_a = Agent( + agent_name="Agent-A", + agent_description="Agent A", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +agent_b = Agent( + agent_name="Agent-B", + agent_description="Agent B", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +agent_c = Agent( + agent_name="Agent-C", + agent_description="Agent C", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +agent_isolated = Agent( + agent_name="Agent-Isolated", + agent_description="Isolated agent with no connections", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Validation-Workflow", + description="Workflow for validation testing", + backend="rustworkx", + verbose=False, +) + +workflow.add_node(agent_a) +workflow.add_node(agent_b) +workflow.add_node(agent_c) +workflow.add_node(agent_isolated) + +workflow.add_edge(agent_a, agent_b) +workflow.add_edge(agent_b, agent_c) + +validation_result = workflow.validate(auto_fix=False) +print(f"Valid: {validation_result['is_valid']}") +print(f"Warnings: {len(validation_result['warnings'])}") +print(f"Errors: {len(validation_result['errors'])}") + +validation_result_fixed = workflow.validate(auto_fix=True) +print( + f"After auto-fix - Valid: {validation_result_fixed['is_valid']}" +) +print(f"Fixed: {len(validation_result_fixed['fixed'])}") +print(f"Entry points: {workflow.entry_points}") +print(f"End points: {workflow.end_points}") + +workflow_cycle = GraphWorkflow( + name="Cycle-Test-Workflow", + backend="rustworkx", + verbose=False, +) + +workflow_cycle.add_node(agent_a) +workflow_cycle.add_node(agent_b) +workflow_cycle.add_node(agent_c) + +workflow_cycle.add_edge(agent_a, agent_b) +workflow_cycle.add_edge(agent_b, agent_c) +workflow_cycle.add_edge(agent_c, agent_a) + +cycle_validation = workflow_cycle.validate(auto_fix=False) +print(f"Cycles detected: {len(cycle_validation.get('cycles', []))}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py new file mode 100644 index 00000000..cc6e83ff --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py @@ -0,0 +1,122 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +market_researcher = Agent( + agent_name="Market-Researcher", + agent_description="Conducts comprehensive market research and data collection", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +competitor_analyst = Agent( + agent_name="Competitor-Analyst", + agent_description="Analyzes competitor landscape and positioning", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +market_analyst = Agent( + agent_name="Market-Analyst", + agent_description="Analyzes market trends and opportunities", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +financial_analyst = Agent( + agent_name="Financial-Analyst", + agent_description="Analyzes financial metrics and projections", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +risk_analyst = Agent( + agent_name="Risk-Analyst", + agent_description="Assesses market risks and challenges", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_consultant = Agent( + agent_name="Strategy-Consultant", + agent_description="Develops strategic recommendations based on all analyses", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +report_writer = Agent( + agent_name="Report-Writer", + agent_description="Compiles comprehensive market research report", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +executive_summary_writer = Agent( + agent_name="Executive-Summary-Writer", + agent_description="Creates executive summary for leadership", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Market-Research-Workflow", + description="Real-world market research workflow using rustworkx backend", + backend="rustworkx", + verbose=False, +) + +all_agents = [ + market_researcher, + competitor_analyst, + market_analyst, + financial_analyst, + risk_analyst, + strategy_consultant, + report_writer, + executive_summary_writer, +] + +for agent in all_agents: + workflow.add_node(agent) + +workflow.add_parallel_chain( + [market_researcher, competitor_analyst], + [market_analyst, financial_analyst, risk_analyst], +) + +workflow.add_edges_to_target( + [market_analyst, financial_analyst, risk_analyst], + strategy_consultant, +) + +workflow.add_edges_from_source( + strategy_consultant, + [report_writer, executive_summary_writer], +) + +workflow.add_edges_to_target( + [market_analyst, financial_analyst, risk_analyst], + report_writer, +) + +task = """ +Conduct a comprehensive market research analysis on the electric vehicle (EV) industry: +1. Research current market size, growth trends, and key players +2. Analyze competitor landscape and market positioning +3. Assess financial opportunities and investment potential +4. Evaluate risks and challenges in the EV market +5. Develop strategic recommendations +6. Create detailed report and executive summary +""" + +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/Fan-Out-Fan-In-Workflow_visualization_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/Fan-Out-Fan-In-Workflow_visualization_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png new file mode 100644 index 0000000000000000000000000000000000000000..d45a9a2d1512dffddda5f01c187b8136725a543e GIT binary patch literal 28198 zcmb@uWmJ`G)Hb>R=~U?yR1j&9kd#gVY3UG=knR$sTckq~=~6(tk(5>h>F!24J#+2v z{mvNg80Y61d#|w&p7pFdW?XYV*L8=fD$C;IP~spE2z+@tDK!KFB@bR=SQzk|g$cYW zc*8JNl$ApKL;m-vF)t2*xPy?F64!7~-N1&!3;& zEbDk1DsxNmme=L|_wD&yRs4x#rl!Bg%s7mtw?vM=U_HdicebqiFnw_?K2}rO!eH=m z@Ns(Ez*I`Dy6eXeDMBanoSo!Y^nveE7m2Ftte?P-#4@d6L=tjQlK(%wVYfun2n|)+ z8R_Wg__fVHk(I^V+1UvV4b3kXOioTV-e2hGh^Bk=ib&tUAOg>*;?+$7Y61yK$%x3! zks`8*3QP4bo+Dq4=y1u{SzWzNO&QYD(_7~4jm^x^e0+S0^=r%Zds)Atpkctxi7%K) zBZ`XIt^a%pDk@^B(PxKy!F~6alZ+PDhx5!oYRkxA@V%bUFilA<-TC&iA2p;++~Yhx zck#oTh8ud}m%aMQ5Vd%CcvzTCATBPBMM&5ch<%&Wea{$HWp3$5-09}mX8VP9 zZ2dZC?5e6N-ND_WuT5`tSUXoU-WZzJ!uXe^yMzjcTh|ZQhshs1-Xmq#4=pX_9NFuP zqH3F+rStUkJcWtxjc6N*8yPiITBrUqNB3jV{_@Fgwh4g^R zK20_5iDxDXlPWwrJv%e`cYQ@J>Mg*^$~sh_&J5q#q@Rj{F0vcc-2f9hJwHc95aXaHau^X1 z5)%F{(k6U8QNipusWyC0atxdB`SoN~jmst}1_3pgEtatFCAp}m=opxE?B5(V{s#}R z4ps-XeMloZUJd6)O>kzahJuOBP1QOf{LFh3Tf4gQrmFKRrro+?=(@7yQ&sbk(-gsE7)?S=xHnZ8esY#Tc;#*nd=n z(nnzmi@}c_9T8;ni;DqEOG}jXJ3)tEz#useK25c{qt8?^= zK`*%KWR1gJv&b^=s?k~}i_!dNcdHy0Wk_$|ZUZM7vunP}J#u7HN^TJr7S>o+Nf&@S zt@NiA1=EOnC$F!ql@=GXc9NU%SPZ-%B_-Y3o~))k8D`4x{@&2g5I!F9@2BhR_~hg_ z0vf?m%_6OWjb``#1=CfQq~4V1Xo8EK`rUB0xcK;XM1Ga#{5IT3GK9d~pw8JOlFRk- z)M2zxlV!BQ!wEU0WB6Li+MP(QW0)-$La9I_(OT5G!6Ko?N7Np3)5hNOUl&P|^vX!}RmCcVA+$mPc5fQjl!tSLT2ZfqN z$+59FdwYA$BDqG(%@sAT_-&`UPrXHNM|8mYQu5h!!;q2N14g5yt)0}LA!bzau`L+i zY;<(A(45pztTTC|x6W`DpG_s*zQ<=Hdy~lkV8NcnC##c~}oV zpL6GxrKNU=t-XDHFW?uJzwV95%Tr}||HWS1ckk=#EAF0!4WKYHpLC=1IgFiXvYxDB z+nj0iD(ii7e#nfb`eG&sX@`TN7m-|Ap7sxQzP7ZqP&V#FS`sMl^3Vy+Y*qxg99Pj^ zoVY%p*OLSvcw1jDG&MavKR=HGUTQPb@SL|vJx_Vg@qj<=$(@#~ixV#2OLxvrWW1J= zl6q30A@P8?u-|R0)Wm8K7Xy#n@ATCDg}He_u*=RAMO|H;Rd0AqjJD6tL?z2ilTXCO z$?n~|chTH-ro>Nnrcn^nbuIxAY@?|7_ZB*E?=dnK?ljNVI$?s#cC+OU4XI{lXWwCG zS6)0P5%4-@4JM?Eh6or*#s4Lq?Sk%g;$)?@_THIy`eP>yg#Ah%g;Kh}P59Ot=k;5N zxv?^{wdwlm(Oz@}4-ZcyjZjs$QES^#`BwcGgZjeldidJA{QT8p=48}*IU z>0%l7w9FFx(|Y3f4&mJ%0O(_)%*$rTZEVU+28vRFdzV-A>6Mo9}7?`$DWb>ULPsa zws&$GuJJ_>k&vMC^Yc>+*dws9vF)9mS=D52zfPLWQ6x(j^`Z2+IPN&w{8RR_pAvb= z>R^T}M4Y|TeKSaAvPI^kuZBMPPxyF0g|N9>MT++KBm&Y2qVIzcnb_4~oK<>K5*cJF zLRh`ZIAarrSS)5{=E5s&tqwM_{A-s#OFk=-U0%~#76G>r6|W~rLkMWzN-;nbL_y@! zj-g6_E%duIsz+B-Q=8k}C3JIhE7q$f@b&dAEh`gm_Pu%+8=J{D4G7@3sKv{ds0hFu z8f_JpVI#1qQ#B4~f`Wp32M6j!DiEdZ7d!C*+vG>_@bhDu;f;@vbMo_RC%m~j=SAf6 zhWNPoq63^Mb6i%asjW4EPZ!P^P`*y0A}0@B?oC>Rec|Nd^3Tg-s&ig{yVw=eHZnpo zFfahy`o}6|;$d$Si)HiS#t81qmoEzk(KpAzDWzm&OlKNAqmT->9v3hC1LXk+AE?=rcF zXT?Mf1S@i$mwkRIDHOp3G=-Fgs@2~t22F=Pi5Jd8f(nX_jn(!0-QL!Q*4f#q)8zg5 zZ9qVoSrO<@Lsg{-}ko&PGt6od44t!{xw;zI~z5LtD!O6>860b*XKJIpZe}Dhk zmFb&5fQyEj-@GX{Y^>|9=YRhEdGcc?Mu>x95Y^BSV9Od}Iyf7*sHpUox)fVSWE^@& zWX!*MJx_Rq3?yF!FZ!^%?H1~+|F9#G1x#1{Ds{Zf3=<4F1OQ)KXJ_u19tQw5w4~mS zo#}e|Sh;uU>5&}Gm$x7{t6RUvMF_d=v_nFVO-Tvs?v}eep0rCA_3=#EoPtpcyYC@> z{`^^ZvAY``5fRZ=qF4QU=H%7S&*-SAsGKev8v3=4=xPPTn1rKO+ zcfX@w8~?_XtCaHDm4Pj~s@KKURR%y$na_o*^ZGCJC%(g0v@v=6*B3A*WIUG3Q7hRf zk$-+XeX%y2N0`WN5RQGD(HKk$8G$}$WtExtl2+J^-+FSro%NPZicPPoz0PIR{%Aw9 zy{pUYdmy%SY6KMp#r)4K*&6$KiQ3ek+Q)U;_x@WXnsmfmoQi1c_0@%miHScTginCV zkSHsfPAuB4=?tT+tZZ$pEUVL!hK6Pz04~ySkfs|KcN-56uk^)_TL6u8O3Kl0{rmTK$;nt=US3gy^o0Q0WfT-}D0!_k zm=g6;e;pf0$13E=DJl-1BCZX-8aBQS3zLrx#zrSM>+b1M^O8AeDzus|x=piNIZBVC ztT{J5Cxu0ytr4ZSZ9x=}R&JF>D;>u=@rc;Tcb-gMZY=V#gDt=oH$ES*UPdEOS3py;^(5<$` zM?*scG;F%ulc1YJZB*yXEag~m#t#tkc? zXPDCfSGl;jPQfciX7U_dT-aPDz}V-&Z1;|jJHZ4WW?!WK9pZDTty0bqjq>wDAhThS z_Sa6ZL@w}bnw+JP0s!O6lw56}Y{KK}mw`%u_VE9d*9 z`{~|%AznqGM(V@Gy_Co-y=9dz^4gI) zK{|$3Lx!Fr@W{#r@Ky7?tq8w6bX?#3!C2M9-Z+{9;h=-RKkF~Wm5&~7ZZtL>B=q9Rk z-2C&^0-W!KjEqdN%jW3TTx)>UB7~4WD1|$rPU!}mp8|Ez1+19nj|Hf1+LfL@)%>yb z{{8#0O6xy55E4@W(m6Rgnr6JYu6-D$XZb!ZjsoGj@te7%8rv)q{l<;bs@IcNQ;9Gx zHiJ5igtZv1PFNx&G*7OtueX|RYisMR*qKN)DL&lp)A1eE784VDX=2hUdUdGodc5_# zGkN9p6r|(I$a0cO(dBWsr2xs+0(L0%Zg2j|!KH$Vl81}S1fPoU1=NY`ys9)pu8}Cl zC%dy205ePU_%+{o{Q-DGo-t?v7rgX3-ZFu5!8kZL*lNA)&DD`MfJq&Dd(O#fJGwMs z5AH>K0V}4;U%$M_lnp+nruIPfrt{0-NjHW@Mvl>A9rlgr@8cnGik1@trs z^#4IeXaz)shd*=>#KpxuKKf&1U5VFOp-BJbsg={~o>$*KAOSD3(g5`E+}&LOHh>Pf z{S*`wl*4V!X*~^}M^^zPEl>QT+(*{iqUPECVXLb=5tEuY8V7bRba?lLftBSybjsJyhc?q!4;#>U2mJgL_lFy|kE zr{3NTTFm3PuhJy+(_A=8BKantvnTP0W(*=7V8p#lr;nPNTEjnU?{%YFfVy;vipL4f z+1&5xN82u%hK96Iw?@Y?shI9e)t*BBbbx4&3|vr?hDb5|6qttPzj^bfze!tgK`=h$ zBdqiDbDe724_0Fzgxo%N7sFqQb=K8|08)ydk!| znr&*F*n_1F?J#Zc?6gX^oo(`g4nQ)$9W8i^iIo+OtgLMJvZKv-Il5ylHG=5&Z8Y|z zqQrR;1gx*dzpkz>Gz55CzQUtbsFcUQSrkQKDk$$7Agd+tL<}hY1A~GxA>_OoEvA6q zXq^Y6Xc#Er(}_|;^8|qq5fQO-FWthnpXpqV){@WQ*y*fWaS+YPQ#R-zW)~&S4d8jvP)DJG$9)L-> z2bRgITY&{ll18r+83_rAf??8hV*`Et-@!tHg2;A8Q0IC>W23{t%G0P_i7+V!cMlKx zJ9iL%o)353EEVX#^w%Y6$fRxCn3|#@x_|t5md3}zqIeXLb&sU>zgW-)u^`$5YBk-8 zSN^ka{u#Umd7$_JQ!}fGaa9$Bf=vuz~W*P6N3RyA~p&d zW^-QS?3WL{6ykq9K}=16sr}VZGp!2RPUJ$atcau6`a1vPKM2hONe;D+c!m4cVDH~5 zE6txhd&bJf7S;bjC~19j^Bb6pb^6M`>)n6kA#En$=#gs9(wI1u; zy8&!QP3imY!pG2+WSG>scZ_h9bRVV7DiM1<$WA=%W;a3B;6_(b3WWO&3-a5=>+z6qmeDwX{fa@$t=} zxWYp~^TWi*dmsHawEVay(!G%lAE-DX1w!f~y?L|M+s8-qpt8DJ+Y7~Yp*;-mNvxYc z&I4kc|C%xoJi!ckczMkM%XZlHy@8eqpVNxcXp@guZc$M$f)iYu59}}z3Oy}85s?r0 zw8F`-x1IC$$q5N07uOfNBRv`#_P<^VK>=6}MHX@^5S;!`S|Hwjp)_)*A|p_SyqU8t zq?SjW9Z%t+;GSA3op%PQBb~ zBTVM8AaEC^g&>D?6=<8^f>QJA=y{1zGcDM4ZnPqH!T1A#*zju84xGL;enN<-fU@OO zRJvf-%OJ*Xe4&Jt2VqxtdgM-EeB|5P1=I>Tx1a)+fQ%FJXmeRU&H@m{w5juyl5FO1T~lUH$ueJ8WG4$2SBoH@@iR#IDH4-v1#U z{(u4}=hoBy)H^K&OqW|ik|BCkhRj%JEvo8{YK^yM@0o5l9@QlI(%G&&owuGu>1S;) z&S%NrDb%E>m57#2DXV$!h+eI>+tN9NVZ=ujMXEJ3V!$g=h>OmoCK9g{)|^#Mzp#!X z@Z0H0=p<21vS5DR$?1XYz4o=+#hHXh;iZ=G%G$KE9EI?t4ddT*j8Yc$7yj+9rzTdZNGc*U1-rt zV1v(rtA4FJdTniO^1&i?cXqjBzY2TYT_&cysiTF1Q}S6;R<$6@G0Miqse`-Od|(+5 zKipG!`;@S{Ww7bCaKA`68&g~l5hZarg$WNhh_SU@M8^Ty;-V{^w!EKU!#_^j**g3J zsc_s?c4$j|)j!yrY~-|`F2akR5eb;{`C311*h8&UwjPw{en1ME@$${oiN)& z?a*yo&$QOA^G(1~_KWmdPmFJ5eVzZB{?RcEN=ayIlQs8U=g4`~>Kkq~C{St(tPx+^ zSel33PEvkitW$yQ=5UbEyY%4llyBW!t#=SPl7X*|U+=Na*cnBQ$sx;8 zBUEEH{@uK=MRlqsDU9Q1nYnD=r7a`nbH4A1M|$@AQ_{}XfAGJIej%T*drx+d=SS@;@rK34BJM zb>~(%a%CeA@gfN*qMG?H4OZ$bzt7uq*8F%fC0`#=vt_Y7$rbq99=U3DryF!jWx~$o zn>wbovV?TvthcgK+`cg^Qr13_P(R5<`2CL8z@y^C`|?!ZVRN6Yc&34pO3SWgmg<__ zAzdQ#$yA)ci^c&7DUBNqdY)!|dt6x9_~OnIkLr%Lg7yB`p&;UM?sNa*X(11Qh5ME_ zxHq?hmfqw^x%wreEr^HA{`h0J6ecJ92cSER;7L~?mwP5;@m+zhD6bAS(! z^STn>2u6*h5W^r%B<)~7Nwb{&vTIr^27}VJERq@@o4TzzvXg`Xlx}Qj&U6loPuc@Skmzv58kHdI&Um8cw|5#*_ z?ra=zkAlTUK^Wd^cv3W4@+s@UDCE3kvQkFDQ6X#*XKZ4*n%JB6gt}kv5oS`_KN1)o z3{>6bij_*I@R|(kMt5&-0N5FUG_^$i%k90`Nm-pTcXX&z;uXwe*L&5ff7PV$N~b=c zNqMhS(sNsl3PMuCO35W#`MvS`d^8-!y0gUWQ3m$LyZ#J|FaHTpsWT~~o8jRXQ_anM z@UlXgJC2%sHI?lEgV}RGYxLoFv<$p(+bv4)p+2Y?J9kj6@XBes8B%6-_VBWHUqy;Z z#>}hTH8~}X#F8B4ladR|)7E3}vy}v`W$c0|NG}QpGRGJQ<6>=A&FUwas&~%CA2ATheE*te1Y_A8DnR?Y zb z43jopTi-00E@x4k4(HkVEv00`MN22-?6d1Zq!vK)nT1L1?mE?4(u7t2MCW;9CyCa_ zmKzAbuN~2gxZzcztcufE1x0Hq|I=jp|9Yd8dPgXs>`LHkbS{)Tm=Yc2L|Q2L(kBUQ zHTO!m%gMFD?P}m=MfEk_JcY{2D-py+@zv9I^Ak38dKVi zC-+DTuH^4cEC+hd3M-vsoDa>f&#h%89u;)7bj zZlo5S``sfMXI0m+-2OYEy02=Qk=#F`t=VsM2xD85XpB+(Qn#)d90OU24a##%S*u3t zq^~C3rSw*91i^F~M=uOp>T~$(Fr7KxXUU&xh0YjwE$;cZjg?#ArV8p0JMz@J6;}SC`B?WD?=95J&(5zJ`39rRA_f;fF7}9Nm%481jyMWB@Awt{)9)C% zDOvR53j2)cXTt4*kr!$d3mF}41Fg!h-94=YZ!O*)x#l)1%1a)K&ytUGb7I5CqC_|; zI!Vz)qawD(n=l*C4<#Bs|GC&&TiX?F7{w6%^*?dv@B`bflGe5WhwDC%^v50;umK`X^S zr>OKnVt;Q9W8gbCh+~z8@UNSwtg@0>n)S>~ zK8GVOR`G72Y>+i|7ujB!EAI5b-E4-Tg#YD=wwtk(%hL@?ioVZL=c!C$xtn~O)<(bS z@%~meczz(t)oFh)@ibl3mvSKL!%w>>Vh|Ia(LRuPgxuJyhO{Kh1EaM4xtRb4kCY`x z58glAM)0YAtTg{pd6Oajp|07@e=_#}jG^Z0L6Z)}fK7dfzdAcB8fmF*sBV}$ce9n8 zSUA6ZeTnO+CsB@>PC13*TfAZKEl`bM#o)#DSf0{W7|;~RC}CDkPKmG}ma zV-?Ik+o95igeW;Mraz@j&_&MlwyesX`|HiLlzn~uM7Jp40#u5;@@D0;nf2R2nZ9{) zkHFv8Ux3&Fi{?s@H0^7_o~DkK!oy&E@?Z`o-!LQX>u{<+w#ExRva-$5-ExD7 zncBa-KL-4DSlQp7V0RBT>8Z{7c%FMgzHj4SY-%l7%-dVsqWf86X*S~i`rt`pV#B~J z>G{k1cB&FqTu>VPT61(U(HPn`R(d^n zbb54Rbg)@uPMd#0K{)qvl=xiu>aS+-JIct9uzw>nRWyc8K0)pJ{m)FuWx*WVt;T!Go$hc+Wlv8_AJ{3-mirkv_-H0X#oCN zHs8Sx@gxwSd{f-+xVvKhD_D;8ZHp@6>x$u=W37PGU%bz|vXbm9wJ9Dx-_GrAmh#Ux ze->c^qN^&NMTO4T8s^VXVRGzdTi?>s3O~rF%5*~b2m!ZqYT7Pgt^OkWnY;swfxGA8 z6`jk1ypG?mMJ~$h{)i%;Y-Q5saC{3)1m5Qa-@k;kHvi)qWN8|f49{PW9OO=w#H@jCcBWYS@KmM_a zf0OCwYOfqZXZz&f`GFeQGdNn_RIz8#uHKY<^3c09|o9idntC`QIbMY*_(n|+foXpbRD{i`E@a;hNB{OrWyjuGGaP%gpwZR77g9| ze__w^7c&J%oMK8Hk{>oKaXov-+j_|7wlM zxgFkX!5eh^TWZrI1^b_k#J+EMS7cV;vS)tq^OgBXd0mmToC|W|vmM$=rStPIUxacGW&4^v`gZs> zuIw)B=?c*rPGP-gg3Nm+@x*&n95W3q0gr73YHL? z_`lA&wWTN3(mycav1fs|!?=Rn0fI1Mfhy!-$}N61yM=|KH}*o?1m!v7nmTP89e?Jt zYNdH9s$WM$yEf?_rMR71(lTar?`jo%Eh5Oh*l1IDQhnWx82~pE#%mdP^?2pq-2n8A zfNpwkEayrg|CjRRvGhOuR1F_J^)eT@?jtX2)$+wZ@NnV!h7ArR{j2cJa(wMD6kW8ScOjiF1F8E`3)gYT~z5 zi|mZU&oOcnp7BP}=JcY-nb;aflO|SaR!yCBETY~`+x(mqL9k-@e&LlqExa)ZYejqb zp6^wC*7M1B%}TPD4~mHFmCTrC(D%*pdz=w4f`Xy*ThnN>i1D+CwIspy9)F8bBc8E# zOh5>Q0v-;T+4*DUnVz*=tb#^}PB?MQFUj4M-iCGE?@r`)w(M?;rAIyG|BQRsGukhY z*%d;$U`;2oC(si8>T%sJ5xp?q*SaE^Zm&!zaNEbF&^>o=&X0W)CSP~poK>Nt@;q3l z_aRx>L>N_%w)wZBR#>%2riPkqSYij$5ifdFl^;?-y`gH)u>4o6lk+N_I4e3d_Th`u z?o?pqdI!3%X}OJTu<|>xALA`8{)xd?o%IXo{rdNfKyeLwfYP(+%DYlR#Od8$^$DM! z%r=^Cg(ga7V0!;iCaf2o&&uSY(HeRfXZUu$KwtL9B8tY#}HkP!N zs9?l9?zo43(%LV`%Z(xfsqd%iIXJk7BFywQ{1x5yt%z|&F%|15jJ@87>Tf#j3+sj5 z3~4_-j=zy&YQ-sT#SS4;$<&C2%RU3E$ga0#9{E9&K?I7v(zqGSMTL(I*?q-|F=Uz7&4}3ZKVuRy5Dd3Mr?I_` z2{V&KU)yStjE*aOhm%kvfA(5N?y9&MBV5b;lLM5sq}6G8Y{2j8BNK3?ltG3#HP~^ zNn^?eV-3wj*L9q|Bj0q|Q#Ep$ND^x%O^tksj& zu>2(jifcEu#*55udep=0osf;dw*xO!f|r`9?!V_@tz(e<*T^uXoCDTx1y~y4=M;u> zICcNKKhy6kRf9h*od2ftUHmahzjjGG2j3FEdA2KKuDH$}u7QuE?2@jkJWY7BU(~g4 z*W~jjp7GEwkG0x3!MOXH0YIE^Ws&w5_4->+_bY2BBoqVv!w*_&)+a31w7+K8mH49J zvV_n92^UrI6Uo+pu3}3s2c+BYZ1{!}e=T@S=7o{(#yFYu-$ih*+ZVcF;`3)ErF(WK zl9(8{b1#)cP??{LTa@PoV-qc87Huf;87E>0r11S*+ygo^`k6PV{QP}26nh87Q&9ax zGJE6qaPO{?>px?Zh7xzVo7_qQ?5)Xi*oKXnMGrpH!0kg1IEb-^hOSi6BVCi=ksFH9(7cfFTF(6^fKPj@Tj2{D{m)6j%_H1p0Mlf zfl%DYa)BG8B7Jy6#9MW?0|E6gmE*9|g--Ye8fsQQA>@d#1KtS5i3m#h5gZG@2!+qk zxIT%@dw$3nzLdqI%a&<;HcfamePDDVDRlk}7U|gPMNh}94_wH*=w#0;@82@b0Ja|x zPb)j(LETAi@br&8_+7H6ZusU89D4-b(NM=Bf#>gD$;ZNdstbk_Z!B8i4_j~2Lq_?8 zy?-q==bpgaoGbFkl34U0zI3=yR0((Qp66$BkvR!%X2qesz7X6XLeHa&L>Z%C>$-gN zg6IcD@eOL$H7h@M{B>MCLwspXF*QG+*(&( z&an7jfKdz_#`fSGT{V90w&9!q*#U9r!gdy(-$)aKs?PghP3d@zNVKkDIAzPvhha+P z&y?|kc-^hdot~na+-&Y=_Jxc*1B&~sdSN+#UbtWGvPJ4MxAEse0O)zod3rUuRrSUU zZp(ehVqTxf_fdl#(~O<(ax$ej>Lw=PuLN@PkFzPodmjc~B;X}G7`_ck$`MGCV`CO2 zhUtsD+t4114Ky1tsUv37-_JRl`kqK$(5<$r@;6^(MnSB~apYP39QOR@R|KXXE#aP> zRBHj_+Ddl7ZOMb#MKAE7y|K&Pp{fwaDMfXOR==+*3Pc%_{XfNS9QGB2-(Pz0(;`M^ zJ88Bal1}NVna|jttXX```It0r)>^H@Q7aV-n#=LwPn1?t3b#IFwnrYQO`TP5-Zk=~ znUdGc_sM|tc2o2n;a!n)6YYqfJb8up)DdyYa^W{}-+mq9RH3oi?#gS?2s-B*MZrYm z4S5u2uo&=;NYRq&Z&9>#4SqO$rVJ{^L{@wv-x7l@*1m75UFA8f`2e( z9r|o$1V{Y4m7n~D4|yN0e3nL|ooReyoZl<1EsLYfOA>hF@gDGvyy8aRn~b{LRd5hn z^=)QuyQ$!)V+|XM$}ACFI;>VAST%M0BpR0^zvG$a#%3YfjL>YLZ`69uWa`dI8nUiEd@s~8mHa6XB^sL(5X518t>>a5Wg z64_p57Oj9)IUZNrEMfRe(1Xha(4KHK%I+PsolSO&;j%J+9fWr8nhHsQK`@Wm^N?Ht%7?Yl)A^ zMeR$Aw`{ih^L+FoRMr0~INyEl|1QAvK|CS`*IjQJ^Rx1?u)E&=T7+6aLjqUDIBWZ? z&ly0dFX>iUtk~-4g?h|5U^Io5OH|t>5$iOEMP*2YI4!joS2 zyJ(39NhYCb?B3fRdzGuL*@F`TUac%24K4X zUBU)x+`G6q^=ZvQ4V-9p_pP>_jHh>qS~@#1*xA|l{{DSiS?S`Uz2125-n}`XF(X;& zz?`VK7EkZ`88m>|6jOu-s1jH;CKeVUz_@Ga?ZpME+H%VB#fbtf9bHU*{sUpJV?3ZZ z2Y|E!7(~W4Hh4Z)bw2B?zz6>DAqobIyZ|1$1FZoI3oD<5DWIa_k(9JFy`Uf^(5mcR zU4ubyqokzNvU*Td@3|x*pQF~`ae(pT$B)p62uWL8c4``$n?ytiGP0zNFCVp#TC3Yk z%6IPHM+e#n^5ejP!^6kV%*)fB9w~|=Hv_6vcVAz7ZxY8o=o&aXhdxW=9&U}_2A=6I z&ty4f9CA3NRvK1>jdTF`mT4=6eQSp@}n{%QqMHa4v6h7F;>VM8j3K$ye(_Z~;A zoc-S=d8DjNOG|6-=qL!374E{G7zddqks#YaNn$tn4jhdkkh>fLU8mDJ63X@l#h=T z=;#5k1;Ka}sQyn>r!^TgmEfL0K&^3FCGb4iiBkR`sOgsQN#o&73W_k0B(@9=5`t{N zIV6{~r>EyT&?`WFu|IB+VR?~tNwf=6z3bWvHJC#eP)Lx(W7xbn+k^N35FNo_kep_) zJ0Lk_i3ebeXl2O8(3$`R9QgPx@8~5rE)Txfh4(Ih28!Cy$cVIt1~CYw(m#ACs#JzG zL@FqdN)He?B%-Rbb*vuzggpbQ_u}sY^;e)_gNB4++Ll8>D>E1<@H54M0y|V@CM)>l zA%N>A_ioJtw_{?b7$*tn?!d$O#`Z3QmQS^^`z{?R5S#-81Ml$il7Qaf zL77ci>pGY{kh*1oT(h>M8FqMdgd7O4$mm5yn-mgL;^PBBVmdO$xSf3tq#^JUD!BA} ze;R+0O>Le^MleWCnw#Gwu*GSx!LwrLt+AVV${!?bZEeezA7L;-7Y{@PNTNP)tP-^3 z-w%RF0+?Tg4GbhRdtcJ^lS1z_a`b3ORBL z3eEGsa!2b!nP@j! zupGp!2HY`a(71Gg`>?n?Mur~XuUGQlW~`J1Zyc&|SX2;W_rE-d1|?gg>rIxf0+Mh?3zXHFchK{B_vV056m^e{a zC=8q<;P=Ut9BdE)XG`GmV-l|cmRsq-w7L^3XQlB+&?N|Bu9~_!%l35>ggi$_?rl9a z#^d8-P^tiZ>1%~~gXa+rH9^REZ;{#7*4DfD_-qIL*tvKhMrrzHnx*h_=I$#zXiVdUY%p%;<6kF z10lQ=gd2USyhTdY84z{-KvuYLvD^G+wlBiUu-P~2zG9*=?A7uHy(D9tI4h~P+=s#m zP_le38wCEh7=)j1-xI%i9J~abEK=Y>!Z&!5vwi*R`1I3axg%1V#iX461Qu~>YKqf- zt_8<({ba}W2bkSkble_q_QL$-q-=I-?d3Yh_X;_Xp5T9E!2RCVrkOE&a_tOB4p_k` zz^ut#2Y(L%$|^Dk>{d>FFE1}IHSGv@fiX%vD{s?)m^S^-m>4E!x`A`ktI`80pd zc=H=ENF2i`^#I~=cL2nE?$WU)Ok_abI3u+FxIuISyV-Z!m@Bj*gCb7^@B_9*}Hm5Y-UtX|X3sf)?3~#>K^D_Tq{zM=cBjHp3=}6M9cljh=Fu zwqxDE#7wfL?Rb6g{@6$ULUP#YS&&5LKADkx*T>vClIrG})&EwtSf>m%f|Tt$h++^3 zAS9k{eH$$GKKskiI1z90PY)yxAOe%lEr?#+HE>z3Dg14rTx~Zi(-lnvK@u0F!bqkt z@6LQ%#69lF|H}mkL8eqN=bPxEP!P#z&dkg#WT^)VXv^dz6$l1PD=MVq0w%9F7s zA0d5x{kesOw;(>~FC6&RFib?E9tJKI~G5rTO zK(Zr2*zA`?m&?I{Wg)i*{2}QynEGJhT?T8PCBp>V2;SZ)4rC0js7= zN=r-Y1*|v*Vhp5;7b%a1l+9Kx(l zhNzE5uN_D@BS6q;Mb+ee_C6>msEmxmka&iTgyH^u_*YOF2wzJ!hChj;krETPgPrG{ zUx0io1)i`?ilQ%HU0sb|o*gJouK=9*Psw3vX^CVi;~_5K_yC0|I`o6f!(o-@kU#Vq zJ$XUd+&4R$A>wl$|3SnnasSLaF)>kVp-fwC8fO>WHHPr22KeVl((@MN`2^!;GrUI4q)NFc=4iTW`?$)pulR_74kaFUBLU46JZSGYNRL+81k66Z{L3L z@}()@H~&Y9~vS8Ms5gb z;}EYZ#XuB@3X&!~Y5^kP*qU`l`bS0KBPCh}1_nqyLA%-4>^hR%?S1hs^AQ{@tS4Zg zaC}1$zuk;6D0ub`52XMs`n?|%L9ZD9m#VS6ul*;cNMX|ibXqrolL7UY7S+zyRwQtg z-90@e?hxe%gIW!(+pYxj37cjJNYCL&1z3wQ$W&0N=O1FOk(*?fPxRZ+%@n)hzm}(*qW-n4d+kX z_)n4xKeR(Nu?HghO!+uQq^1*8+Nxi+wbeoPr@U#QTs(B~JhjS0Umu7vuQKb5203aC>kqgciXlp)w{Sv%04axI zB+}s6ld>Re6$&EjF(|f>M}yG4x%7vF7+T@T4k$=Ur&dq<-c&NGE79St4mgnT) z&ul%caTXlFgvWeCu#Gde%x5t*TC)a@7>j}azPNXf&4?!Qxvq$TxOkk zqSvQpzlMjcx32g*-(MI>&VKE4)#YGdK#h`V1F3?%i3vSi38Lk4&m$}3DH^&ZFCo6H zISaxG32jb;A|dtlUO}7AZf;C>?tEIZfD{Y7`L{@+?bVf{???9qWc3fq8O5N@-oe4H zj*cKOs4T6bamW?0Ag?MXxXnP;C=VdqeLjdRS-`>chqCACo(X)J9Gv(9724tMY;*P$ z?gKVVfQtRPKgTJd94)J^=J1FD$^Q>H9EQcE zj(_*}=iw+4Bp$ngfpLqHQUN)@l@tqb&lf-5-|zbwIPi z?ZX3iIQRsckkA57$RJ@C1N{aZjiLftI^>BIptb?|WGuwWtVJj*dTO0k;RF|jA7+qv zOhKcGEHR-(=RzLSFqmQFI7UZ7@dE_31A2CinmNrSRo1`rZz9k4I6quRuJg(4n~a-x zWi;y2Awq(2njdeCLvix{kI|dP7w2}GvoU}FXZ}TX``x~lhDb5=j~`*+@1SJD#>ZcV zqPrM$1o^cv`@YJn;3}g6ylR?Z8ZqDW9K|GVr%PwWx;DEH{Xz_ibqA&=@d&DQypLzo`J>45Hg=Zr|2jENe_#{6vEwWJL{yZrkKbNE= zJxW;th=2NjT!NHiU+0R{E_i%wy0Bc2H*5b+GHy1i9i z4L^MZNHEZHW)FgI5)%`yxGlJ<;xDpW)BaU>myMZ#x2CP->NTuBe zV4I+pBEnP4VmKNoksb>enfSA0E4L~W0KS}^oyQz#xeEkg;oW(HmYT?rtKr*y)-~+_YmWTESU<(2YidXm6J@>d4Gc$mn zLBh`f^^}g$LIeVlkf2~<8URoTyI4yAj6}#88T&zZz%T3{2>eQbZ0-vq+h*zQjXYN0xXnKy+_P7 zBt(r4aR-by3$d zE)LNLNf`Je{r>e;4uq&)18YOR{_Wr(AkTAN`Naa*40$Ifr?*jQ-$fudu5Wo?pDM^U zVZoqvz(7RqgSP^Eh##(h;9&HCfhx7V(+090Xad>*Fz@c|1wI8YZF$9B0&^@vK+5C- z1Od>{z`IBEs$d%eIQkn5X??DdUQkdFWYZ$*xO|ynonCMgfJD*!{CFoMEG*^6WU}x@ z8t*Pp>QYcqDF9lHR``ul=%X1>!z7kWbzIHlSr*w z3FGD9dRagVia`e8@rHro86dG&ON~owsbC=hFd77;o(<^vvR}j$pqMYgXs%leqZ+3m z(Az&fnC&FY}e;+X%1zPfbF=R5C9awEg%n2pq6}l9Gf#B zvrIMmih~Ugh&%xL!d@NYnUlm=b}jM|E}w6t)Dqr`1Tl@KTt zC^$f<4`_XM9-au0Yrqzc27>~73GD!@`pc2`8Z>S4ii)nl{0&fdxidqLMtSl5r5C$) znAP(Hi~->%2SXRtJws2pp6=TK-wy~NU;sfreit@M4(NiL`1l}DzJgLtoz81Qxx2Rq z0ecES1y&Z87A(ETw1laAb~&5IlGXA*^a4O+3!+ES+L{~mz_rL%v-&P|#+H_xprzto zRT+H=lGP&W{LbA@tLNndLfgK$X7@?Z4*{nNRw#5H^EF8!JKDa+^gNOw?zxfOMMY)X z<7>_WxLt&#rWzJQeZ)~F=s%z@!s2kKX|kRdz668EM>c-m(gukSDYh<&f(sltuo%@` zO`vwX{)n0m#nj8Hwh=}k2}Z&w+Sdu?S~Y9v?m*}8&7XYjjrvj`si?_0zzk0V_1G^IH=R$?*3p>sIO(!MgV5*O{KJIWm+EV`@!Vb|lNOkV1 z-AZWt@({l&Hq3tMInw6(Glu&LB#+sYKj z;C1C~ws!N6Zd=MrrMdroh+gxk4O8N@0O9Tn(jntEJ(aki)UG_Ud~Hq#gX?5#4n)L7 z-Og<4Sq(R3jbGDHs2#rx568feL}ypu`}`U`ZW`gEh!j85|EoZ(Il+ZUE9$SjLpo1z z)R3p@9Fvaj6X`i_!+a*vs82I3PHUCiNp^9&e~NIa7-by>FF*N&)uzw4M_mndoZslL zduGXhKB`hHuIzYT;>ki6T~xl^`w)kD7a(zb__3@rUeHY}c03lI6ajAP&qhkA<5WaG zsn1g}`hdSA6Y7SN!Yuq~Zqr&T8+R_HxspDY4Jg@*_1ogTo-0%d9Npim%AB>E?#0fK zsC{_2En=(t6}V^HOP)OgFAYy%BX`km|pek&9I32&@z# zf0xe=8KbIBgepgCI?SjYcV91_v_fsq>fcK9PXS60Ng<~)mqE>?aj+12$?SP`-};}@ zo^c(2TsexaGK8=yQ6R9Qs!+HM7LHl( zAi%}%FW>v*N+easqhN66S5T33Z;=xQcONgt#hv~e6IJ@VDHRvDTf~?~p1VqdVl6I| zx#Fk1H#d$Z%s=^o}#*a@44uhCrLOrfq(PG$d7-2IIEHdIvL zCNtkpgwkEqyO=6{-&G6+Ll@C*S1=HtAxY*!rS@#iXQ}>m-UXqWm4L)*XNs(mk}=g4 zpKCDl1hVt@;aMg88|jf7RS_E%v>aT_n3m55E-T)Si+i!Ifgw^lVKay6Tg9^$*E4CP z*qq*dU)GtcRG2RegCC&OnQ2`s+)lD%f>94NWk)Lzti|ap-XC2s4hyeo%SdJ<^du!T z2w^S-gUTrA?kgD)osZ!AG|DN&TfwO#jSXYq?y|IsX3i_^IQG1-FYPaBJsEB?#qTO+ zEn|&!A}btxp4A3^*QcSayt4c}ZV6^K>WoOEBUV(WZP>ItVG{>4C_v{Z+4-_582IXg zi>WKQ*ji(vwd%r^!G#!gFSQ-CYATi1LMBrc3^uM8RgoPcDi0z=8zrU!g4vi%2}&w( zDP$d^nNz8uVyDRUmjwpDGv2?em>sCw(|JAKj3|1uS6%(S2@cdY=}vT*=rWHtSxiXm z=&xG$m&A?t8%)^`b#f74 z2vA$C#V&kP_4?<8FdutXR2qHcxb)M$x2Za93cwB;O9UOEicXmaU;yrv%viIA1-b6(9Fu(1% z>Ao%rA30PO{2Dn~_iXKRLPRGNp{vb z2;s2!M}ay0AM%yLFDE6O=OVsDP(kN?G?-uYKUdh!q z9iGwWGB;D>Lk}ADa}J1)?`SuD3hBQ^PgoejEC0B5nOzl`dDl1d0|}HD%oM*ktgS@x zif)8m>BOl}Wuq#P>5Gw7ZF(@hsv1FMBhI1#mbc8aMbBN#$8>&YMxwRs==Y%Kb2Nb! zbB)zuzk!$RD6ue$thUAsYi{;;F+<4TnhOc-1+Q>D8^0;c%IHMTt@qRJUYZcwqF&7- zo}-f0%0TIH_J8&joY5_M{RQ?L_fa7(mo{)NI~Cz4-NsL!NdE2flus5Nq6(Fngm);T zhIC*NWIU7csx@Qqq2i96?qS-8b}S#*E5uYy_^Nqr`z)oSOollcN$ z9j_h-s*19h#k;6$mDNe92%gH}>df9Q>bB&nEypQ_+aj>+s(G4n&FFZH?V~O9Inx)) zOg;rUJ4r(tYiXksPvn<6-#R`U`tw7?>XL=b6H|WK8J1*nmI8HjXhpD-P2p~f7fxwv z$~Trt`HGt+aP6mcK}e=~uhPop?U4Q-9wM(28Ohc*bvV6fEw+d=Jo)4+(}n-bZZH$_K>Ig0TCxN{xx63 z_#$_-ig!KD-tp4%y=k7@CayU)yur7}UnQ#qiDkD5D_G^IOPPv>K^ntO1HU)C@?+rQqH^dbPCg1Z zhe!qxsR?n0(;Z&!&UrSeFDAvy+B)DePwgU`l;Xtl!q=l9Mv&xoRMOg{#PqKKSv%P) z>S$xO=^kBH`3=r)*i3Ewe989oRr{y(B)MPV)VE@6;cQSVyN_dL)<1}AOL>wfsfZ>g zzpw7dDq>=7iQJbSzWm1mM2V!?`)t#UoXWC+7T zq(P;5{7QTFQaslAKpSjxV&qS470B7sdzUt3+EunygHJ^^t3dyH{!LyWKH%Tj= zhq3!K&Lwje<4!g0E$6!uF1NxE?vE3t(HAxwPXBvj>l<;z?$g~V8wqLZkgZuD=IkgFVlWuN2oVn#%Hy*N}4#9efV%QVD^fy zE-T9xy_8ON8Hck=EO#3t6Q$NwaPU{_SsIy5r^fd`-bkl)Gw=_!2+e=Q4k1G3FxK|8 z+b0sG-;2h4JD*+NLqlWb}tcUUCctXSeO*mRf$fkjO90Xtz7E4C6T~o-@B+Xm$#(rzz*}EAYhxA*gza^P> zn#Jew7jVgfJ80buzRJE}q~Z_D3tNOB7bT{e#FYOy?fpRUHqq|l=e%#4UwF*UvzNMR z5G~kMO8xz|YL4CjsUr&mPumDnN#+c%!yN?K7Bu@R=jvCJh~t)G;U{XpbDZLP#Jh~Z z15==J%zC9na;k91a{5LDFL6X*e0RW zqm6G{pXQDU&V;PBZ>dWku`~OL<+XYh?xoN!E#j2^eF=qMI5E8mD9OhMv8+r*wW2iR zoh&AI!OM6yB+2~vs{W*6Z7v}%!x!<^F5#L^xJ!I{A&7;n^!2REF3Ii!*Wj?v#e~@aj$|$KF-j< z)_;#Q>)qK50)mhFM*+<^qwH#2q>A+cd(6{$r zgFoI7dfVFeZgT(0>Vs9hbxb`8mm*h&&$>-odYZS}uEl!_sj>nrFERr zaMM0-psQax_%(s*>)!|v&V6R#|6!G}RPn zn=Y{k0lSHEPHE{U=eTHGOE;Uh)2@=o)^w4;EDIhpxfF!s?@iBXQYGq zfAa)6i(sqDgaaC?5bD?bnh5X{w>(L*-18Dt171^r7y0#@Xy1oK(X22G=Z=cC)_D-- z3(fs);KXSL`AX`Rv>RA*ekJ#mC3cKZ#(l$%bj?@DmVEV>K$5%OqoK(StxqpzRENq0 zt1kzr`n-_1Y`RpX6Hg|SNo_lvo#x@?$Idk!B^Z1xASO_Wi8|ewj8~TWi|j|Eh@|+g!1;ifU+}LSijYUC z)j1Qhjb1wkG+x&0@}uC8x$T8O3`|W$A+N9o)8c>h;^mhiA1vH*n*jatVeA}Pb9eN6)z@VC#`$WIY0n&eAD1>cNI`;?X zYTwlTKjg%-S8dM7X8N9*3DG9)O`U;nFxzYutaaM)%iS3twbSeyXs6zYNl6V545#df? z{q-RdU&%A#rQ+`sIZ4X7tq-duA|##>H%BUO_-%Sd?z-;>l$Za%6CbVZ-F;rjEBC@N zXz}ach|qWX)s@O+SlplAW?S!LVr&#$Y*uZ&ME1C!%$W&&O86WWm#7}vn<_C7{}i4f z1M|AtMs(gLUF&2+GU3X2;&ka86$*A!ehR28v@YN8aE!~xFh2}=YyZg(-pUWoa=&*I zw<9!xyQdYGD~M4^bRtx=UTOgT%I@{b2F`xp8Jr(!wmx<@dJ7)BI-Z7RE@VE*VB;5w zR+|?8>;ee|=`{I1xWsA7KN>V3q|7x8io-yr**P9vC1sXq#r)QbZcLZvqb_ELK$5HH z14+Q)>$l3(or44!Jy}{dMR;o#rs6KUZNH`6M{}xKxwaK1I?D~AkbFvP7wjJv`C_*2 z|Ni71$Tzx4uSqZuUWDtT8{GHk)So9m_BcFjI2rCEo!Bch$)MF8;2r1%OquQ_1)%b02!XCzhvU%lk4?OJmu*UOf7Zr z^o`3iJmZ2$Jbi6v(I%KGp0q`FFDnSa#-*{vZc^6;fQ*U; zhnKLyeyz z!D&ioL?5W{S}zC1C6e)t3yAm5v&zeHn1ac=Zf`&5j7iu2Tjowb?!3F-tr_ROF@s_` zQV=%Zzok=RIIxGlh$6e(lFRTmzwJ7AiD&9)3ht${k5uuggXnYP>Fs`+DB53E&$@k1 z4>kvb+6a?`zq+}*v&bKStb7ppqVRWX`(DMiqsHHz{b&(zJg6)^p5@((2n;+JlG?0A z6Sj4{5Pr9P5lt^ThqXUF6XmkC8%-w=2;o*hAoFz*2*Ix-tK0kBv$Q+kJ2OhQQ;^k( z;*_1Li%|rb<<^^vy!Io#ZBbE)6a|zevwWPJ4!jDV)J% zcM@vqGrlqjGOp!R0rM~V{f6^%K5&`6gU&r#DO>X3Ho~Q?iH6bR#Yz#_f5D&q*QqDgYk%*en-1 z?yOy!_SeNUoJ*CM)c3TJsfn(6gjk1TQ;_+IevSSouLlw>XT!>oPokPlUXR=VGxQQ< zadO#k14YDtf8c8o1{>o|s$2f&0uYFJPxR`96`ow{$I-4shpl-xF7OaRGqZHxc)B5x z?MMRQM`(C(WAJ_uMe8jW(2-Rdo(VrP`tg1RIrR2an2L&eeDc4KUG8;6Nu6Z|F03qc zRYU#S@?mTM7Y!3C|3lezT}CiEF(#gWn8#neEfO5T?vOn(-wz>99HF)?E3cHx)qLRJ z1S-6wp=9>p>|AsAD9#}fk_wHlD*g7v4K>gM$CW|-8Q?$1` zcfXekSqdvE4bVl;Dht(z2jZXPPy|HKr%u!O^ZrZ*u(ffUGkG^*p%-!tigM0EjjeOk+6lIJMo}~E>*2Ce3!C-yfPGQOP;}(o(x7CN>hQM9+cpGp7(F> zoAneVNb^vKHmreJ#VY zF1>~W+MrjGy4-eS3iOh&U<&wzZoY^doWELJ_IWf-$h+jliJ=dzpj|LNF=HuGEdiVF zyS?_UyN>Yl4t7$X-ch0wThniuVwYWcrh%B<`E~=a)ee-WLcvk3abK_@P^z*_ootm`zR7nuS z@#os@C5`n@@A{8NN~G0E=`hdGqH)?XB= zxPGQz^FYJwU+yt-5|Zz2K!{!WpSYrH1d%)dd#If)99EE#a8@|dEC-6mee8gR;!031b76ZA7CTnYh3Vd4rmIcs!;M#Yf?9_$@7|5t8X4IRMppnpnj&Uz zP_P)n0hY*7X|c;H??J64UcZFMXyPoNtHXiK1dZ_AK?w}H$!U@WM<$B=KQ>$^4oayonB8Isc%#2ZVUi%vWaWRr$&*ABw8RWkLBoen*)aq8f)tG?Vlq(y#GKH4erV=P6)~avKI;jK$?rIUNThe}dStvJ%I{zKeAfRDZ{arLV7Db-yhfYefO*jU3aF zYhGwjSF#&Rs4)R5uPE2Xif9&18g4j4WK@iIMp7i#<~_+>we_;HM6Cc(1@x0I{Q~Tg zk3SZcO<6nMU!?jA@5&D9@mF=JVE-!vSes>>>6p{%gotqLTZFd&y_yb-L`>5zK`0D- z+j{EcP);QY6N^Jd%>?~Kf+j*~ln;>x%uYuoTtFF?4afZdI`Ln*=%h}v-*^cAA8z>b a5*qDu712aAr4D}h4G*ROmoJeu`|y8h6YOLF literal 0 HcmV?d00001 diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md new file mode 100644 index 00000000..7292caad --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md @@ -0,0 +1,156 @@ +# Rustworkx Backend Examples + +This directory contains comprehensive examples demonstrating the use of the **rustworkx backend** in GraphWorkflow. Rustworkx provides faster graph operations compared to NetworkX, especially for large graphs and complex operations. + +## Installation + +Before running these examples, ensure rustworkx is installed: + +```bash +pip install rustworkx +``` + +If rustworkx is not installed, GraphWorkflow will automatically fallback to NetworkX backend. + +## Examples Overview + +### 01_basic_usage.py +Basic example showing how to use rustworkx backend with GraphWorkflow. Demonstrates simple linear workflow creation and execution. + +**Key Concepts:** +- Initializing GraphWorkflow with rustworkx backend +- Adding agents and creating edges +- Running a workflow + +### 02_backend_comparison.py +Compares NetworkX and Rustworkx backends side-by-side, showing performance differences and functional equivalence. + +**Key Concepts:** +- Backend comparison +- Performance metrics +- Functional equivalence verification + +### 03_fan_out_fan_in_patterns.py +Demonstrates parallel processing patterns: fan-out (one-to-many) and fan-in (many-to-one) connections. + +**Key Concepts:** +- Fan-out pattern: `add_edges_from_source()` +- Fan-in pattern: `add_edges_to_target()` +- Parallel execution optimization + +### 04_complex_workflow.py +Shows a complex multi-layer workflow with multiple parallel branches and convergence points. + +**Key Concepts:** +- Multi-layer workflows +- Parallel chains: `add_parallel_chain()` +- Complex graph structures + +### 05_performance_benchmark.py +Benchmarks performance differences between NetworkX and Rustworkx for various graph sizes and structures. + +**Key Concepts:** +- Performance benchmarking +- Scalability testing +- Different graph topologies (chain, tree) + +### 06_error_handling.py +Demonstrates error handling and graceful fallback behavior when rustworkx is unavailable. + +**Key Concepts:** +- Error handling +- Automatic fallback to NetworkX +- Backend availability checking + +### 07_large_scale_workflow.py +Demonstrates rustworkx's efficiency with large-scale workflows containing many agents. + +**Key Concepts:** +- Large-scale workflows +- Performance with many nodes/edges +- Complex interconnections + +### 08_parallel_chain_example.py +Detailed example of the parallel chain pattern creating a full mesh connection. + +**Key Concepts:** +- Parallel chain pattern +- Full mesh connections +- Maximum parallelization + +### 09_workflow_validation.py +Shows workflow validation features including cycle detection, isolated nodes, and auto-fixing. + +**Key Concepts:** +- Workflow validation +- Cycle detection +- Auto-fixing capabilities + +### 10_real_world_scenario.py +A realistic market research workflow demonstrating real-world agent coordination scenarios. + +**Key Concepts:** +- Real-world use case +- Complex multi-phase workflow +- Practical application + +## Quick Start + +Run any example: + +```bash +python 01_basic_usage.py +``` + +## Backend Selection + +To use rustworkx backend: + +```python +workflow = GraphWorkflow( + backend="rustworkx", # Use rustworkx + # ... other parameters +) +``` + +To use NetworkX backend (default): + +```python +workflow = GraphWorkflow( + backend="networkx", # Or omit for default + # ... other parameters +) +``` + +## Performance Benefits + +Rustworkx provides performance benefits especially for: +- **Large graphs** (100+ nodes) +- **Complex operations** (topological sorting, cycle detection) +- **Frequent graph modifications** (adding/removing nodes/edges) + +## Key Differences + +While both backends are functionally equivalent, rustworkx: +- Uses integer indices internally (abstracted away) +- Provides faster graph operations +- Better memory efficiency for large graphs +- Maintains full compatibility with GraphWorkflow API + +## Notes + +- Both backends produce identical results +- Rustworkx automatically falls back to NetworkX if not installed +- All GraphWorkflow features work with both backends +- Performance gains become more significant with larger graphs + +## Requirements + +- `swarms` package +- `rustworkx` (optional, for rustworkx backend) +- `networkx` (always available, default backend) + +## Contributing + +Feel free to add more examples demonstrating rustworkx capabilities or specific use cases! + diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py new file mode 100644 index 00000000..65cc4776 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py @@ -0,0 +1,632 @@ +import pytest +from swarms.structs.graph_workflow import ( + GraphWorkflow, +) +from swarms.structs.agent import Agent + +try: + import rustworkx as rx + + RUSTWORKX_AVAILABLE = True +except ImportError: + RUSTWORKX_AVAILABLE = False + + +def create_test_agent(name: str, description: str = None) -> Agent: + """Create a test agent""" + if description is None: + description = f"Test agent for {name} operations" + + return Agent( + agent_name=name, + agent_description=description, + model_name="gpt-4o-mini", + verbose=False, + print_on=False, + max_loops=1, + ) + + +@pytest.mark.skipif( + not RUSTWORKX_AVAILABLE, reason="rustworkx not available" +) +class TestRustworkxBackend: + """Test suite for rustworkx backend""" + + def test_rustworkx_backend_initialization(self): + """Test that rustworkx backend is properly initialized""" + workflow = GraphWorkflow(name="Test", backend="rustworkx") + assert ( + workflow.graph_backend.__class__.__name__ + == "RustworkxBackend" + ) + assert hasattr(workflow.graph_backend, "_node_id_to_index") + assert hasattr(workflow.graph_backend, "_index_to_node_id") + assert hasattr(workflow.graph_backend, "graph") + + def test_rustworkx_node_addition(self): + """Test adding nodes to rustworkx backend""" + workflow = GraphWorkflow(name="Test", backend="rustworkx") + agent = create_test_agent("TestAgent", "Test agent") + + workflow.add_node(agent) + + assert "TestAgent" in workflow.nodes + assert "TestAgent" in workflow.graph_backend._node_id_to_index + assert ( + workflow.graph_backend._node_id_to_index["TestAgent"] + in workflow.graph_backend._index_to_node_id + ) + + def test_rustworkx_edge_addition(self): + """Test adding edges to rustworkx backend""" + workflow = GraphWorkflow(name="Test", backend="rustworkx") + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + assert len(workflow.edges) == 1 + assert workflow.edges[0].source == "Agent1" + assert workflow.edges[0].target == "Agent2" + + def test_rustworkx_topological_generations_linear(self): + """Test topological generations with linear chain""" + workflow = GraphWorkflow( + name="Linear-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(5) + ] + + for agent in agents: + workflow.add_node(agent) + + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + workflow.compile() + + assert len(workflow._sorted_layers) == 5 + assert workflow._sorted_layers[0] == ["Agent0"] + assert workflow._sorted_layers[1] == ["Agent1"] + assert workflow._sorted_layers[2] == ["Agent2"] + assert workflow._sorted_layers[3] == ["Agent3"] + assert workflow._sorted_layers[4] == ["Agent4"] + + def test_rustworkx_topological_generations_fan_out(self): + """Test topological generations with fan-out pattern""" + workflow = GraphWorkflow( + name="FanOut-Test", backend="rustworkx" + ) + coordinator = create_test_agent("Coordinator", "Coordinates") + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + + workflow.add_node(coordinator) + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + + workflow.add_edges_from_source( + coordinator, [analyst1, analyst2, analyst3] + ) + + workflow.compile() + + assert len(workflow._sorted_layers) == 2 + assert len(workflow._sorted_layers[0]) == 1 + assert "Coordinator" in workflow._sorted_layers[0] + assert len(workflow._sorted_layers[1]) == 3 + assert "Analyst1" in workflow._sorted_layers[1] + assert "Analyst2" in workflow._sorted_layers[1] + assert "Analyst3" in workflow._sorted_layers[1] + + def test_rustworkx_topological_generations_fan_in(self): + """Test topological generations with fan-in pattern""" + workflow = GraphWorkflow( + name="FanIn-Test", backend="rustworkx" + ) + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + synthesizer = create_test_agent("Synthesizer", "Synthesizes") + + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + workflow.add_node(synthesizer) + + workflow.add_edges_to_target( + [analyst1, analyst2, analyst3], synthesizer + ) + + workflow.compile() + + assert len(workflow._sorted_layers) == 2 + assert len(workflow._sorted_layers[0]) == 3 + assert "Analyst1" in workflow._sorted_layers[0] + assert "Analyst2" in workflow._sorted_layers[0] + assert "Analyst3" in workflow._sorted_layers[0] + assert len(workflow._sorted_layers[1]) == 1 + assert "Synthesizer" in workflow._sorted_layers[1] + + def test_rustworkx_topological_generations_complex(self): + """Test topological generations with complex topology""" + workflow = GraphWorkflow( + name="Complex-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(6) + ] + + for agent in agents: + workflow.add_node(agent) + + # Create: Agent0 -> Agent1, Agent2 + # Agent1, Agent2 -> Agent3 + # Agent3 -> Agent4, Agent5 + workflow.add_edge(agents[0], agents[1]) + workflow.add_edge(agents[0], agents[2]) + workflow.add_edge(agents[1], agents[3]) + workflow.add_edge(agents[2], agents[3]) + workflow.add_edge(agents[3], agents[4]) + workflow.add_edge(agents[3], agents[5]) + + workflow.compile() + + assert len(workflow._sorted_layers) == 4 + assert "Agent0" in workflow._sorted_layers[0] + assert ( + "Agent1" in workflow._sorted_layers[1] + or "Agent2" in workflow._sorted_layers[1] + ) + assert "Agent3" in workflow._sorted_layers[2] + assert ( + "Agent4" in workflow._sorted_layers[3] + or "Agent5" in workflow._sorted_layers[3] + ) + + def test_rustworkx_predecessors(self): + """Test predecessor retrieval""" + workflow = GraphWorkflow( + name="Predecessors-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + predecessors = list( + workflow.graph_backend.predecessors("Agent2") + ) + assert "Agent1" in predecessors + assert len(predecessors) == 1 + + predecessors = list( + workflow.graph_backend.predecessors("Agent3") + ) + assert "Agent2" in predecessors + assert len(predecessors) == 1 + + predecessors = list( + workflow.graph_backend.predecessors("Agent1") + ) + assert len(predecessors) == 0 + + def test_rustworkx_descendants(self): + """Test descendant retrieval""" + workflow = GraphWorkflow( + name="Descendants-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + descendants = workflow.graph_backend.descendants("Agent1") + assert "Agent2" in descendants + assert "Agent3" in descendants + assert len(descendants) == 2 + + descendants = workflow.graph_backend.descendants("Agent2") + assert "Agent3" in descendants + assert len(descendants) == 1 + + descendants = workflow.graph_backend.descendants("Agent3") + assert len(descendants) == 0 + + def test_rustworkx_in_degree(self): + """Test in-degree calculation""" + workflow = GraphWorkflow( + name="InDegree-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent3, agent2) + + assert workflow.graph_backend.in_degree("Agent1") == 0 + assert workflow.graph_backend.in_degree("Agent2") == 2 + assert workflow.graph_backend.in_degree("Agent3") == 0 + + def test_rustworkx_out_degree(self): + """Test out-degree calculation""" + workflow = GraphWorkflow( + name="OutDegree-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent1, agent3) + + assert workflow.graph_backend.out_degree("Agent1") == 2 + assert workflow.graph_backend.out_degree("Agent2") == 0 + assert workflow.graph_backend.out_degree("Agent3") == 0 + + def test_rustworkx_agent_objects_in_edges(self): + """Test using Agent objects directly in edge methods""" + workflow = GraphWorkflow( + name="AgentObjects-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + # Use Agent objects directly + workflow.add_edges_from_source(agent1, [agent2, agent3]) + workflow.add_edges_to_target([agent2, agent3], agent1) + + workflow.compile() + + assert len(workflow.edges) == 4 + assert len(workflow._sorted_layers) >= 1 + + def test_rustworkx_parallel_chain(self): + """Test parallel chain pattern""" + workflow = GraphWorkflow( + name="ParallelChain-Test", backend="rustworkx" + ) + sources = [ + create_test_agent(f"Source{i}", f"Source {i}") + for i in range(3) + ] + targets = [ + create_test_agent(f"Target{i}", f"Target {i}") + for i in range(3) + ] + + for agent in sources + targets: + workflow.add_node(agent) + + workflow.add_parallel_chain(sources, targets) + + workflow.compile() + + assert len(workflow.edges) == 9 # 3x3 = 9 edges + assert len(workflow._sorted_layers) == 2 + + def test_rustworkx_large_scale(self): + """Test rustworkx with large workflow""" + workflow = GraphWorkflow( + name="LargeScale-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(20) + ] + + for agent in agents: + workflow.add_node(agent) + + # Create linear chain + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + workflow.compile() + + assert len(workflow._sorted_layers) == 20 + assert len(workflow.nodes) == 20 + assert len(workflow.edges) == 19 + + def test_rustworkx_reverse(self): + """Test graph reversal""" + workflow = GraphWorkflow( + name="Reverse-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + reversed_backend = workflow.graph_backend.reverse() + + # In reversed graph, Agent2 should have Agent1 as predecessor + preds = list(reversed_backend.predecessors("Agent1")) + assert "Agent2" in preds + + # Agent2 should have no predecessors in reversed graph + preds = list(reversed_backend.predecessors("Agent2")) + assert len(preds) == 0 + + def test_rustworkx_entry_end_points(self): + """Test entry and end point detection""" + workflow = GraphWorkflow( + name="EntryEnd-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "Entry agent") + agent2 = create_test_agent("Agent2", "Middle agent") + agent3 = create_test_agent("Agent3", "End agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + workflow.auto_set_entry_points() + workflow.auto_set_end_points() + + assert "Agent1" in workflow.entry_points + assert "Agent3" in workflow.end_points + assert workflow.graph_backend.in_degree("Agent1") == 0 + assert workflow.graph_backend.out_degree("Agent3") == 0 + + def test_rustworkx_isolated_nodes(self): + """Test handling of isolated nodes""" + workflow = GraphWorkflow( + name="Isolated-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "Connected agent") + agent2 = create_test_agent("Agent2", "Isolated agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent1) # Self-loop + + workflow.compile() + + assert len(workflow.nodes) == 2 + assert "Agent2" in workflow.nodes + + def test_rustworkx_workflow_execution(self): + """Test full workflow execution with rustworkx""" + workflow = GraphWorkflow( + name="Execution-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + result = workflow.run("Test task") + + assert result is not None + assert "Agent1" in result + assert "Agent2" in result + + def test_rustworkx_compilation_caching(self): + """Test that compilation is cached correctly""" + workflow = GraphWorkflow( + name="Cache-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + # First compilation + workflow.compile() + layers1 = workflow._sorted_layers.copy() + compiled1 = workflow._compiled + + # Second compilation should use cache + workflow.compile() + layers2 = workflow._sorted_layers.copy() + compiled2 = workflow._compiled + + assert compiled1 == compiled2 == True + assert layers1 == layers2 + + def test_rustworkx_node_metadata(self): + """Test node metadata handling""" + workflow = GraphWorkflow( + name="Metadata-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Test agent") + + workflow.add_node( + agent, metadata={"priority": "high", "timeout": 60} + ) + + node_index = workflow.graph_backend._node_id_to_index["Agent"] + node_data = workflow.graph_backend.graph[node_index] + + assert isinstance(node_data, dict) + assert node_data.get("node_id") == "Agent" + assert node_data.get("priority") == "high" + assert node_data.get("timeout") == 60 + + def test_rustworkx_edge_metadata(self): + """Test edge metadata handling""" + workflow = GraphWorkflow( + name="EdgeMetadata-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2, weight=5, label="test") + + assert len(workflow.edges) == 1 + assert workflow.edges[0].metadata.get("weight") == 5 + assert workflow.edges[0].metadata.get("label") == "test" + + +@pytest.mark.skipif( + not RUSTWORKX_AVAILABLE, reason="rustworkx not available" +) +class TestRustworkxPerformance: + """Performance tests for rustworkx backend""" + + def test_rustworkx_large_graph_compilation(self): + """Test compilation performance with large graph""" + workflow = GraphWorkflow( + name="LargeGraph-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(50) + ] + + for agent in agents: + workflow.add_node(agent) + + # Create a complex topology + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + import time + + start = time.time() + workflow.compile() + compile_time = time.time() - start + + assert compile_time < 1.0 # Should compile quickly + assert len(workflow._sorted_layers) == 50 + + def test_rustworkx_many_predecessors(self): + """Test performance with many predecessors""" + workflow = GraphWorkflow( + name="ManyPreds-Test", backend="rustworkx" + ) + target = create_test_agent("Target", "Target agent") + sources = [ + create_test_agent(f"Source{i}", f"Source {i}") + for i in range(100) + ] + + workflow.add_node(target) + for source in sources: + workflow.add_node(source) + + workflow.add_edges_to_target(sources, target) + + workflow.compile() + + predecessors = list( + workflow.graph_backend.predecessors("Target") + ) + assert len(predecessors) == 100 + + +@pytest.mark.skipif( + not RUSTWORKX_AVAILABLE, reason="rustworkx not available" +) +class TestRustworkxEdgeCases: + """Edge case tests for rustworkx backend""" + + def test_rustworkx_empty_graph(self): + """Test empty graph handling""" + workflow = GraphWorkflow( + name="Empty-Test", backend="rustworkx" + ) + workflow.compile() + + assert len(workflow._sorted_layers) == 0 + assert len(workflow.nodes) == 0 + + def test_rustworkx_single_node(self): + """Test single node graph""" + workflow = GraphWorkflow( + name="Single-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Single agent") + + workflow.add_node(agent) + workflow.compile() + + assert len(workflow._sorted_layers) == 1 + assert workflow._sorted_layers[0] == ["Agent"] + + def test_rustworkx_self_loop(self): + """Test self-loop handling""" + workflow = GraphWorkflow( + name="SelfLoop-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Self-looping agent") + + workflow.add_node(agent) + workflow.add_edge(agent, agent) + + workflow.compile() + + assert len(workflow.edges) == 1 + assert workflow.graph_backend.in_degree("Agent") == 1 + assert workflow.graph_backend.out_degree("Agent") == 1 + + def test_rustworkx_duplicate_edge(self): + """Test duplicate edge handling""" + workflow = GraphWorkflow( + name="Duplicate-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + + # Add same edge twice + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent1, agent2) + + # rustworkx should handle duplicate edges + assert ( + len(workflow.edges) == 2 + ) # Both edges are stored in workflow + workflow.compile() # Should not crash + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/examples/multi_agent/swarm_router/swarm_router.py b/examples/multi_agent/swarm_router/swarm_router.py index 1801c25a..b8f73365 100644 --- a/examples/multi_agent/swarm_router/swarm_router.py +++ b/examples/multi_agent/swarm_router/swarm_router.py @@ -26,7 +26,6 @@ router = SwarmRouter( agents=agents, swarm_type="SequentialWorkflow", output_type="dict", - return_entire_history=False, ) output = router.run("How are you doing?") diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index a8f7bea4..e693a90c 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -12,8 +12,10 @@ from typing import Any, Callable, Dict, List, Literal, Optional from uuid import uuid4 from loguru import logger +from mcp.server.auth.settings import AuthSettings from mcp.server.fastmcp import FastMCP from mcp.server.lowlevel.server import LifespanResultT +from mcp.server.transport_security import TransportSecuritySettings from swarms.structs.agent import Agent from swarms.structs.omni_agent_types import AgentType @@ -21,7 +23,6 @@ from swarms.tools.mcp_client_tools import ( get_tools_for_multiple_mcp_servers, ) -from mcp.server.fastmcp import AuthSettings, TransportSecuritySettings class TaskStatus(Enum): """Status of a task in the queue.""" @@ -603,7 +604,13 @@ class AOP: log_level: Literal[ "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" ] = "INFO", - lifespan: Callable[[FastMCP[LifespanResultT]], AbstractAsyncContextManager[LifespanResultT]] | None = None, + lifespan: ( + Callable[ + [FastMCP[LifespanResultT]], + AbstractAsyncContextManager[LifespanResultT], + ] + | None + ) = None, auth: AuthSettings | None = None, transport_security: TransportSecuritySettings | None = None, *args, diff --git a/swarms/structs/graph_workflow.py b/swarms/structs/graph_workflow.py index 4a2b0c90..d1a23594 100644 --- a/swarms/structs/graph_workflow.py +++ b/swarms/structs/graph_workflow.py @@ -1,10 +1,10 @@ -import json import asyncio import concurrent.futures +import json import time -from enum import Enum -from typing import Any, Dict, List, Optional import uuid +from enum import Enum +from typing import Any, Dict, Iterator, List, Optional, Set import networkx as nx @@ -16,6 +16,14 @@ except ImportError: GRAPHVIZ_AVAILABLE = False graphviz = None +try: + import rustworkx as rx + + RUSTWORKX_AVAILABLE = True +except ImportError: + RUSTWORKX_AVAILABLE = False + rx = None + from swarms.structs.agent import Agent # noqa: F401 from swarms.structs.conversation import Conversation from swarms.utils.get_cpu_cores import get_cpu_cores @@ -24,6 +32,525 @@ from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger(log_folder="graph_workflow") +class GraphBackend: + """ + Abstract base class for graph backends. + Provides a unified interface for different graph libraries. + """ + + def add_node(self, node_id: str, **attrs) -> None: + """ + Add a node to the graph. + + Args: + node_id (str): The unique identifier of the node. + **attrs: Additional attributes for the node. + """ + raise NotImplementedError + + def add_edge(self, source: str, target: str, **attrs) -> None: + """ + Add an edge to the graph. + + Args: + source (str): The source node ID. + target (str): The target node ID. + **attrs: Additional attributes for the edge. + """ + raise NotImplementedError + + def in_degree(self, node_id: str) -> int: + """ + Get the in-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The in-degree of the node. + """ + raise NotImplementedError + + def out_degree(self, node_id: str) -> int: + """ + Get the out-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The out-degree of the node. + """ + raise NotImplementedError + + def predecessors(self, node_id: str) -> Iterator[str]: + """ + Get the predecessors of a node. + + Args: + node_id (str): The node ID. + + Returns: + Iterator[str]: Iterator of predecessor node IDs. + """ + raise NotImplementedError + + def reverse(self) -> "GraphBackend": + """ + Return a reversed copy of the graph. + + Returns: + GraphBackend: A new backend instance with reversed edges. + """ + raise NotImplementedError + + def topological_generations(self) -> List[List[str]]: + """ + Get topological generations (layers) of the graph. + + Returns: + List[List[str]]: List of layers, where each layer is a list of node IDs. + """ + raise NotImplementedError + + def simple_cycles(self) -> List[List[str]]: + """ + Find simple cycles in the graph. + + Returns: + List[List[str]]: List of cycles, where each cycle is a list of node IDs. + """ + raise NotImplementedError + + def descendants(self, node_id: str) -> Set[str]: + """ + Get all descendants of a node. + + Args: + node_id (str): The node ID. + + Returns: + Set[str]: Set of descendant node IDs. + """ + raise NotImplementedError + + +class NetworkXBackend(GraphBackend): + """ + NetworkX backend implementation. + """ + + def __init__(self): + """ + Initialize the NetworkX backend. + """ + self.graph = nx.DiGraph() + + def add_node(self, node_id: str, **attrs) -> None: + """ + Add a node to the NetworkX graph. + + Args: + node_id (str): The unique identifier of the node. + **attrs: Additional attributes for the node. + """ + self.graph.add_node(node_id, **attrs) + + def add_edge(self, source: str, target: str, **attrs) -> None: + """ + Add an edge to the NetworkX graph. + + Args: + source (str): The source node ID. + target (str): The target node ID. + **attrs: Additional attributes for the edge. + """ + self.graph.add_edge(source, target, **attrs) + + def in_degree(self, node_id: str) -> int: + """ + Get the in-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The in-degree of the node. + """ + return self.graph.in_degree(node_id) + + def out_degree(self, node_id: str) -> int: + """ + Get the out-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The out-degree of the node. + """ + return self.graph.out_degree(node_id) + + def predecessors(self, node_id: str) -> Iterator[str]: + """ + Get the predecessors of a node. + + Args: + node_id (str): The node ID. + + Returns: + Iterator[str]: Iterator of predecessor node IDs. + """ + return self.graph.predecessors(node_id) + + def reverse(self) -> "NetworkXBackend": + """ + Return a reversed copy of the graph. + + Returns: + NetworkXBackend: A new backend instance with reversed edges. + """ + reversed_backend = NetworkXBackend() + reversed_backend.graph = self.graph.reverse() + return reversed_backend + + def topological_generations(self) -> List[List[str]]: + """ + Get topological generations (layers) of the graph. + + Returns: + List[List[str]]: List of layers, where each layer is a list of node IDs. + """ + return list(nx.topological_generations(self.graph)) + + def simple_cycles(self) -> List[List[str]]: + """ + Find simple cycles in the graph. + + Returns: + List[List[str]]: List of cycles, where each cycle is a list of node IDs. + """ + return list(nx.simple_cycles(self.graph)) + + def descendants(self, node_id: str) -> Set[str]: + """ + Get all descendants of a node. + + Args: + node_id (str): The node ID. + + Returns: + Set[str]: Set of descendant node IDs. + """ + return nx.descendants(self.graph, node_id) + + +class RustworkxBackend(GraphBackend): + """ + Rustworkx backend implementation. + Uses integer indices internally but exposes string node IDs. + """ + + def __init__(self): + """ + Initialize the Rustworkx backend. + """ + if not RUSTWORKX_AVAILABLE: + raise ImportError( + "rustworkx is not installed. Install it with: pip install rustworkx" + ) + self.graph = rx.PyDiGraph() + # Mapping from node ID (string) to node index (int) + self._node_id_to_index: Dict[str, int] = {} + # Mapping from node index (int) to node ID (string) + self._index_to_node_id: Dict[int, str] = {} + + def _get_or_create_node_index(self, node_id: str) -> int: + """ + Get the node index for a given node ID, creating it if necessary. + + Args: + node_id (str): The node ID. + + Returns: + int: The node index. + """ + if node_id not in self._node_id_to_index: + node_index = self.graph.add_node(node_id) + self._node_id_to_index[node_id] = node_index + self._index_to_node_id[node_index] = node_id + return self._node_id_to_index[node_id] + + def add_node(self, node_id: str, **attrs) -> None: + """ + Add a node to the Rustworkx graph. + + Args: + node_id (str): The unique identifier of the node. + **attrs: Additional attributes for the node (stored in node data). + """ + if node_id not in self._node_id_to_index: + # Store node data as a dict with the node_id and attributes + node_data = {"node_id": node_id, **attrs} + node_index = self.graph.add_node(node_data) + self._node_id_to_index[node_id] = node_index + self._index_to_node_id[node_index] = node_id + else: + # Update existing node data + node_index = self._node_id_to_index[node_id] + node_data = self.graph[node_index] + if isinstance(node_data, dict): + node_data.update(attrs) + else: + self.graph[node_index] = {"node_id": node_id, **attrs} + + def add_edge(self, source: str, target: str, **attrs) -> None: + """ + Add an edge to the Rustworkx graph. + + Args: + source (str): The source node ID. + target (str): The target node ID. + **attrs: Additional attributes for the edge (stored in edge data). + """ + source_idx = self._get_or_create_node_index(source) + target_idx = self._get_or_create_node_index(target) + edge_data = attrs if attrs else None + self.graph.add_edge(source_idx, target_idx, edge_data) + + def in_degree(self, node_id: str) -> int: + """ + Get the in-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The in-degree of the node. + """ + if node_id not in self._node_id_to_index: + return 0 + node_index = self._node_id_to_index[node_id] + return self.graph.in_degree(node_index) + + def out_degree(self, node_id: str) -> int: + """ + Get the out-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The out-degree of the node. + """ + if node_id not in self._node_id_to_index: + return 0 + node_index = self._node_id_to_index[node_id] + return self.graph.out_degree(node_index) + + def predecessors(self, node_id: str) -> Iterator[str]: + """ + Get the predecessors of a node. + + Args: + node_id (str): The node ID. + + Returns: + Iterator[str]: Iterator of predecessor node IDs. + """ + if node_id not in self._node_id_to_index: + return iter([]) + target_index = self._node_id_to_index[node_id] + # Use edge list to find predecessors (more reliable than predecessors() method) + result = [] + for edge in self.graph.edge_list(): + source_idx, target_idx = edge + if target_idx == target_index: + result.append(self._index_to_node_id[source_idx]) + return iter(result) + + def reverse(self) -> "RustworkxBackend": + """ + Return a reversed copy of the graph. + + Returns: + RustworkxBackend: A new backend instance with reversed edges. + """ + reversed_backend = RustworkxBackend() + # Copy the graph structure + reversed_backend.graph = self.graph.copy() + # Reverse the edges + reversed_backend.graph.reverse() + # Copy the mappings + reversed_backend._node_id_to_index = ( + self._node_id_to_index.copy() + ) + reversed_backend._index_to_node_id = ( + self._index_to_node_id.copy() + ) + return reversed_backend + + def topological_generations(self) -> List[List[str]]: + """ + Get topological generations (layers) of the graph. + + Returns: + List[List[str]]: List of layers, where each layer is a list of node IDs. + """ + try: + # Get all node indices + all_indices = list(self._node_id_to_index.values()) + if not all_indices: + return [] + + # Use layer-by-layer approach similar to NetworkX topological_generations + layers = [] + remaining = set(all_indices) + processed = set() + + while remaining: + # Find all nodes with in-degree 0 considering only edges from processed nodes + # In rustworkx, we need to check if all predecessors are in processed set + layer = [] + # First pass: identify nodes that can be added to this layer + # (without modifying remaining/processed during iteration) + nodes_to_add = [] + for idx in list(remaining): + # Get all predecessors using edge list + pred_indices = [] + for edge in self.graph.edge_list(): + source_idx, target_idx = edge + if target_idx == idx: + pred_indices.append(source_idx) + # Check if all predecessors have been processed (or node has no predecessors) + # A node can be added to the layer if: + # 1. It has no predecessors (entry node), OR + # 2. All its predecessors have already been processed (from previous layers) + if not pred_indices: + # No predecessors - this is an entry node + nodes_to_add.append(idx) + elif all( + pred_idx in processed + for pred_idx in pred_indices + ): + # All predecessors have been processed in previous layers + nodes_to_add.append(idx) + + # Second pass: add identified nodes to the layer and update sets + for idx in nodes_to_add: + layer.append(self._index_to_node_id[idx]) + remaining.remove(idx) + processed.add(idx) + + if not layer: + # Cycle detected or error, break + break + + layers.append(layer) + + # If there are remaining nodes, they form a cycle - add them as a final layer + if remaining: + cycle_layer = [ + self._index_to_node_id[idx] for idx in remaining + ] + layers.append(cycle_layer) + + return ( + layers + if layers + else [ + [ + self._index_to_node_id[idx] + for idx in all_indices + ] + ] + ) + except Exception as e: + logger.warning( + f"Error in rustworkx topological_generations: {e}, falling back to simple approach" + ) + # Fallback: return all nodes in one layer + return [ + [node_id for node_id in self._node_id_to_index.keys()] + ] + + def simple_cycles(self) -> List[List[str]]: + """ + Find simple cycles in the graph. + + Returns: + List[List[str]]: List of cycles, where each cycle is a list of node IDs. + """ + try: + # Convert to NetworkX temporarily for cycle detection + # This is a limitation of rustworkx - it doesn't have simple_cycles + # We'll use a workaround by converting temporarily + import networkx as nx + + nx_graph = nx.DiGraph() + for node_id in self._node_id_to_index.keys(): + nx_graph.add_node(node_id) + for edge in self.graph.edge_list(): + source_idx, target_idx = edge + source_id = self._index_to_node_id[source_idx] + target_id = self._index_to_node_id[target_idx] + nx_graph.add_edge(source_id, target_id) + + cycles = list(nx.simple_cycles(nx_graph)) + return cycles + except Exception as e: + logger.warning( + f"Error in rustworkx simple_cycles: {e}, returning empty list" + ) + return [] + + def descendants(self, node_id: str) -> Set[str]: + """ + Get all descendants of a node. + + Args: + node_id (str): The node ID. + + Returns: + Set[str]: Set of descendant node IDs. + """ + if node_id not in self._node_id_to_index: + return set() + node_index = self._node_id_to_index[node_id] + # Use BFS to find all descendants + descendants = set() + queue = [node_index] + visited = {node_index} + + while queue: + current_idx = queue.pop(0) + succ_data = self.graph.successors(current_idx) + for succ in succ_data: + # Handle both dict (node data) and int (index) returns + if isinstance(succ, dict): + succ_node_id = succ.get("node_id") + if ( + succ_node_id + and succ_node_id in self._node_id_to_index + ): + succ_idx = self._node_id_to_index[ + succ_node_id + ] + else: + continue + elif isinstance(succ, int): + succ_idx = succ + else: + continue + + if succ_idx not in visited: + visited.add(succ_idx) + descendants.add(self._index_to_node_id[succ_idx]) + queue.append(succ_idx) + + return descendants + + class NodeType(str, Enum): AGENT: Agent = "agent" @@ -129,17 +656,37 @@ class Edge: Returns: Edge: A new Edge instance. """ - src = ( - source_node.id - if isinstance(source_node, Node) - else source_node - ) - tgt = ( - target_node.id - if isinstance(target_node, Node) - else target_node - ) - return cls(source=src, target=tgt, **kwargs) + # Handle source node: extract ID from Node, Agent, or use string directly + if isinstance(source_node, Node): + src = source_node.id + elif hasattr(source_node, "agent_name"): + # Agent object - extract agent_name + src = getattr(source_node, "agent_name", None) + if src is None: + raise ValueError( + "Source agent does not have an agent_name attribute" + ) + else: + # Assume it's already a string ID + src = source_node + + # Handle target node: extract ID from Node, Agent, or use string directly + if isinstance(target_node, Node): + tgt = target_node.id + elif hasattr(target_node, "agent_name"): + # Agent object - extract agent_name + tgt = getattr(target_node, "agent_name", None) + if tgt is None: + raise ValueError( + "Target agent does not have an agent_name attribute" + ) + else: + # Assume it's already a string ID + tgt = target_node + + # Put all kwargs into metadata dict + metadata = kwargs if kwargs else None + return cls(source=src, target=tgt, metadata=metadata) class GraphWorkflow: @@ -151,7 +698,7 @@ class GraphWorkflow: edges (List[Edge]): A list of edges in the graph, where each edge is represented by an Edge object. entry_points (List[str]): A list of node IDs that serve as entry points to the graph. end_points (List[str]): A list of node IDs that serve as end points of the graph. - graph (nx.DiGraph): A directed graph object from the NetworkX library representing the workflow graph. + graph_backend (GraphBackend): A graph backend object (NetworkX or Rustworkx) representing the workflow graph. task (str): The task to be executed by the workflow. _compiled (bool): Whether the graph has been compiled for optimization. _sorted_layers (List[List[str]]): Pre-computed topological layers for faster execution. @@ -174,6 +721,7 @@ class GraphWorkflow: task: Optional[str] = None, auto_compile: bool = True, verbose: bool = False, + backend: str = "networkx", ): self.id = id self.verbose = verbose @@ -181,14 +729,30 @@ class GraphWorkflow: if self.verbose: logger.info("Initializing GraphWorkflow") logger.debug( - f"GraphWorkflow parameters: nodes={len(nodes) if nodes else 0}, edges={len(edges) if edges else 0}, max_loops={max_loops}, auto_compile={auto_compile}" + f"GraphWorkflow parameters: nodes={len(nodes) if nodes else 0}, edges={len(edges) if edges else 0}, max_loops={max_loops}, auto_compile={auto_compile}, backend={backend}" ) self.nodes = nodes or {} self.edges = edges or [] self.entry_points = entry_points or [] self.end_points = end_points or [] - self.graph = nx.DiGraph() + + # Initialize graph backend + if backend.lower() == "rustworkx": + if not RUSTWORKX_AVAILABLE: + logger.warning( + "rustworkx is not available, falling back to networkx. Install with: pip install rustworkx" + ) + self.graph_backend = NetworkXBackend() + else: + self.graph_backend = RustworkxBackend() + if self.verbose: + logger.info("Using rustworkx backend") + else: + self.graph_backend = NetworkXBackend() + if self.verbose: + logger.info("Using networkx backend") + self.max_loops = max_loops self.task = task self.name = name @@ -208,15 +772,20 @@ class GraphWorkflow: self.conversation = Conversation() - # Rebuild the NetworkX graph from nodes and edges if provided + # Rebuild the graph from nodes and edges if provided if self.nodes: + backend_name = ( + "rustworkx" + if isinstance(self.graph_backend, RustworkxBackend) + else "networkx" + ) if self.verbose: logger.info( - f"Adding {len(self.nodes)} nodes to NetworkX graph" + f"Adding {len(self.nodes)} nodes to {backend_name} graph" ) for node_id, node in self.nodes.items(): - self.graph.add_node( + self.graph_backend.add_node( node_id, type=node.type, agent=node.agent, @@ -228,9 +797,14 @@ class GraphWorkflow: ) if self.edges: + backend_name = ( + "rustworkx" + if isinstance(self.graph_backend, RustworkxBackend) + else "networkx" + ) if self.verbose: logger.info( - f"Adding {len(self.edges)} edges to NetworkX graph" + f"Adding {len(self.edges)} edges to {backend_name} graph" ) valid_edges = 0 @@ -239,7 +813,7 @@ class GraphWorkflow: edge.source in self.nodes and edge.target in self.nodes ): - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}), @@ -328,8 +902,8 @@ class GraphWorkflow: if self.verbose: logger.debug("Computing topological layers") - sorted_layers = list( - nx.topological_generations(self.graph) + sorted_layers = ( + self.graph_backend.topological_generations() ) self._sorted_layers = sorted_layers @@ -380,7 +954,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.nodes[node.id] = node - self.graph.add_node( + self.graph_backend.add_node( node.id, type=node.type, agent=node.agent, @@ -434,7 +1008,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}) ) self._invalidate_compilation() @@ -492,7 +1066,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}) ) created_edges.append(edge) @@ -560,7 +1134,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}) ) created_edges.append(edge) @@ -629,7 +1203,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}), @@ -860,7 +1434,9 @@ class GraphWorkflow: try: self.entry_points = [ - n for n in self.nodes if self.graph.in_degree(n) == 0 + n + for n in self.nodes + if self.graph_backend.in_degree(n) == 0 ] if self.verbose: @@ -888,7 +1464,9 @@ class GraphWorkflow: try: self.end_points = [ - n for n in self.nodes if self.graph.out_degree(n) == 0 + n + for n in self.nodes + if self.graph_backend.out_degree(n) == 0 ] if self.verbose: @@ -921,7 +1499,7 @@ class GraphWorkflow: if node_id not in self._predecessors_cache: self._predecessors_cache[node_id] = tuple( - self.graph.predecessors(node_id) + self.graph_backend.predecessors(node_id) ) return self._predecessors_cache[node_id] @@ -2228,8 +2806,8 @@ class GraphWorkflow: isolated = [ n for n in self.nodes - if self.graph.in_degree(n) == 0 - and self.graph.out_degree(n) == 0 + if self.graph_backend.in_degree(n) == 0 + and self.graph_backend.out_degree(n) == 0 ] if isolated: result["warnings"].append( @@ -2238,7 +2816,7 @@ class GraphWorkflow: # Check for cyclic dependencies try: - cycles = list(nx.simple_cycles(self.graph)) + cycles = self.graph_backend.simple_cycles() if cycles: result["warnings"].append( f"Found {len(cycles)} cycles in workflow" @@ -2268,7 +2846,7 @@ class GraphWorkflow: reachable = set() for entry in self.entry_points: reachable.update( - nx.descendants(self.graph, entry) + self.graph_backend.descendants(entry) ) reachable.add(entry) @@ -2289,11 +2867,11 @@ class GraphWorkflow: # Check for dead-end nodes (cannot reach any exit point) if self.end_points: - reverse_graph = self.graph.reverse() + reverse_graph = self.graph_backend.reverse() reachable_to_exit = set() for exit_point in self.end_points: reachable_to_exit.update( - nx.descendants(reverse_graph, exit_point) + reverse_graph.descendants(exit_point) ) reachable_to_exit.add(exit_point) diff --git a/tests/structs/test_custom_agent.py b/tests/structs/test_custom_agent.py index 3cdeda25..63969b97 100644 --- a/tests/structs/test_custom_agent.py +++ b/tests/structs/test_custom_agent.py @@ -6,6 +6,7 @@ from swarms.structs.custom_agent import CustomAgent, AgentResponse try: import pytest_asyncio + ASYNC_AVAILABLE = True except ImportError: ASYNC_AVAILABLE = False @@ -40,7 +41,10 @@ def test_custom_agent_initialization(): timeout=30.0, verify_ssl=True, ) - assert custom_agent_instance.base_url == "https://api.example.com" + assert ( + custom_agent_instance.base_url + == "https://api.example.com" + ) assert custom_agent_instance.endpoint == "v1/endpoint" assert custom_agent_instance.timeout == 30.0 assert custom_agent_instance.verify_ssl is True @@ -51,7 +55,9 @@ def test_custom_agent_initialization(): raise -def test_custom_agent_initialization_with_default_headers(sample_custom_agent): +def test_custom_agent_initialization_with_default_headers( + sample_custom_agent, +): try: custom_agent_no_headers = CustomAgent( name="TestAgent", @@ -59,7 +65,9 @@ def test_custom_agent_initialization_with_default_headers(sample_custom_agent): base_url="https://api.test.com", endpoint="test", ) - assert "Content-Type" in custom_agent_no_headers.default_headers + assert ( + "Content-Type" in custom_agent_no_headers.default_headers + ) assert ( custom_agent_no_headers.default_headers["Content-Type"] == "application/json" @@ -78,7 +86,10 @@ def test_custom_agent_url_normalization(): base_url="https://api.test.com/", endpoint="/v1/test", ) - assert custom_agent_with_slashes.base_url == "https://api.test.com" + assert ( + custom_agent_with_slashes.base_url + == "https://api.test.com" + ) assert custom_agent_with_slashes.endpoint == "v1/test" logger.debug("URL normalization works correctly") except Exception as e: @@ -90,14 +101,22 @@ def test_prepare_headers(sample_custom_agent): try: prepared_headers = sample_custom_agent._prepare_headers() assert "Authorization" in prepared_headers - assert prepared_headers["Authorization"] == "Bearer test-token" + assert ( + prepared_headers["Authorization"] == "Bearer test-token" + ) additional_headers = {"X-Custom-Header": "custom-value"} prepared_headers_with_additional = ( sample_custom_agent._prepare_headers(additional_headers) ) - assert prepared_headers_with_additional["X-Custom-Header"] == "custom-value" - assert prepared_headers_with_additional["Authorization"] == "Bearer test-token" + assert ( + prepared_headers_with_additional["X-Custom-Header"] + == "custom-value" + ) + assert ( + prepared_headers_with_additional["Authorization"] + == "Bearer test-token" + ) logger.debug("Header preparation works correctly") except Exception as e: logger.error(f"Failed to test prepare_headers: {e}") @@ -107,7 +126,9 @@ def test_prepare_headers(sample_custom_agent): def test_prepare_payload_dict(sample_custom_agent): try: payload_dict = {"key": "value", "number": 123} - prepared_payload = sample_custom_agent._prepare_payload(payload_dict) + prepared_payload = sample_custom_agent._prepare_payload( + payload_dict + ) assert isinstance(prepared_payload, str) parsed = json.loads(prepared_payload) assert parsed["key"] == "value" @@ -121,22 +142,30 @@ def test_prepare_payload_dict(sample_custom_agent): def test_prepare_payload_string(sample_custom_agent): try: payload_string = '{"test": "value"}' - prepared_payload = sample_custom_agent._prepare_payload(payload_string) + prepared_payload = sample_custom_agent._prepare_payload( + payload_string + ) assert prepared_payload == payload_string logger.debug("String payload prepared correctly") except Exception as e: - logger.error(f"Failed to test prepare_payload with string: {e}") + logger.error( + f"Failed to test prepare_payload with string: {e}" + ) raise def test_prepare_payload_bytes(sample_custom_agent): try: payload_bytes = b'{"test": "value"}' - prepared_payload = sample_custom_agent._prepare_payload(payload_bytes) + prepared_payload = sample_custom_agent._prepare_payload( + payload_bytes + ) assert prepared_payload == payload_bytes logger.debug("Bytes payload prepared correctly") except Exception as e: - logger.error(f"Failed to test prepare_payload with bytes: {e}") + logger.error( + f"Failed to test prepare_payload with bytes: {e}" + ) raise @@ -148,7 +177,9 @@ def test_parse_response_success(sample_custom_agent): mock_response.headers = {"content-type": "application/json"} mock_response.json.return_value = {"message": "success"} - parsed_response = sample_custom_agent._parse_response(mock_response) + parsed_response = sample_custom_agent._parse_response( + mock_response + ) assert isinstance(parsed_response, AgentResponse) assert parsed_response.status_code == 200 assert parsed_response.success is True @@ -167,7 +198,9 @@ def test_parse_response_error(sample_custom_agent): mock_response.text = "Not Found" mock_response.headers = {"content-type": "text/plain"} - parsed_response = sample_custom_agent._parse_response(mock_response) + parsed_response = sample_custom_agent._parse_response( + mock_response + ) assert isinstance(parsed_response, AgentResponse) assert parsed_response.status_code == 404 assert parsed_response.success is False @@ -189,11 +222,15 @@ def test_extract_content_openai_format(sample_custom_agent): } ] } - extracted_content = sample_custom_agent._extract_content(openai_response) + extracted_content = sample_custom_agent._extract_content( + openai_response + ) assert extracted_content == "This is the response content" logger.debug("OpenAI format content extracted correctly") except Exception as e: - logger.error(f"Failed to test extract_content OpenAI format: {e}") + logger.error( + f"Failed to test extract_content OpenAI format: {e}" + ) raise @@ -202,25 +239,33 @@ def test_extract_content_anthropic_format(sample_custom_agent): anthropic_response = { "content": [ {"text": "First part "}, - {"text": "second part"} + {"text": "second part"}, ] } - extracted_content = sample_custom_agent._extract_content(anthropic_response) + extracted_content = sample_custom_agent._extract_content( + anthropic_response + ) assert extracted_content == "First part second part" logger.debug("Anthropic format content extracted correctly") except Exception as e: - logger.error(f"Failed to test extract_content Anthropic format: {e}") + logger.error( + f"Failed to test extract_content Anthropic format: {e}" + ) raise def test_extract_content_generic_format(sample_custom_agent): try: generic_response = {"text": "Generic response text"} - extracted_content = sample_custom_agent._extract_content(generic_response) + extracted_content = sample_custom_agent._extract_content( + generic_response + ) assert extracted_content == "Generic response text" logger.debug("Generic format content extracted correctly") except Exception as e: - logger.error(f"Failed to test extract_content generic format: {e}") + logger.error( + f"Failed to test extract_content generic format: {e}" + ) raise @@ -229,14 +274,18 @@ def test_run_success(mock_client_class, sample_custom_agent): try: mock_response = Mock() mock_response.status_code = 200 - mock_response.text = '{"choices": [{"message": {"content": "Success"}}]}' + mock_response.text = ( + '{"choices": [{"message": {"content": "Success"}}]}' + ) mock_response.json.return_value = { "choices": [{"message": {"content": "Success"}}] } mock_response.headers = {"content-type": "application/json"} mock_client_instance = Mock() - mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) + mock_client_instance.__enter__ = Mock( + return_value=mock_client_instance + ) mock_client_instance.__exit__ = Mock(return_value=None) mock_client_instance.post.return_value = mock_response mock_client_class.return_value = mock_client_instance @@ -259,7 +308,9 @@ def test_run_error_response(mock_client_class, sample_custom_agent): mock_response.text = "Internal Server Error" mock_client_instance = Mock() - mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) + mock_client_instance.__enter__ = Mock( + return_value=mock_client_instance + ) mock_client_instance.__exit__ = Mock(return_value=None) mock_client_instance.post.return_value = mock_response mock_client_class.return_value = mock_client_instance @@ -280,9 +331,13 @@ def test_run_request_error(mock_client_class, sample_custom_agent): import httpx mock_client_instance = Mock() - mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) + mock_client_instance.__enter__ = Mock( + return_value=mock_client_instance + ) mock_client_instance.__exit__ = Mock(return_value=None) - mock_client_instance.post.side_effect = httpx.RequestError("Connection failed") + mock_client_instance.post.side_effect = httpx.RequestError( + "Connection failed" + ) mock_client_class.return_value = mock_client_instance test_payload = {"message": "test"} @@ -295,23 +350,33 @@ def test_run_request_error(mock_client_class, sample_custom_agent): raise -@pytest.mark.skipif(not ASYNC_AVAILABLE, reason="pytest-asyncio not installed") +@pytest.mark.skipif( + not ASYNC_AVAILABLE, reason="pytest-asyncio not installed" +) @pytest.mark.asyncio @patch("swarms.structs.custom_agent.httpx.AsyncClient") -async def test_run_async_success(mock_async_client_class, sample_custom_agent): +async def test_run_async_success( + mock_async_client_class, sample_custom_agent +): try: mock_response = Mock() mock_response.status_code = 200 - mock_response.text = '{"content": [{"text": "Async Success"}]}' + mock_response.text = ( + '{"content": [{"text": "Async Success"}]}' + ) mock_response.json.return_value = { "content": [{"text": "Async Success"}] } mock_response.headers = {"content-type": "application/json"} mock_client_instance = AsyncMock() - mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) + mock_client_instance.__aenter__ = AsyncMock( + return_value=mock_client_instance + ) mock_client_instance.__aexit__ = AsyncMock(return_value=None) - mock_client_instance.post = AsyncMock(return_value=mock_response) + mock_client_instance.post = AsyncMock( + return_value=mock_response + ) mock_async_client_class.return_value = mock_client_instance test_payload = {"message": "test"} @@ -324,19 +389,27 @@ async def test_run_async_success(mock_async_client_class, sample_custom_agent): raise -@pytest.mark.skipif(not ASYNC_AVAILABLE, reason="pytest-asyncio not installed") +@pytest.mark.skipif( + not ASYNC_AVAILABLE, reason="pytest-asyncio not installed" +) @pytest.mark.asyncio @patch("swarms.structs.custom_agent.httpx.AsyncClient") -async def test_run_async_error_response(mock_async_client_class, sample_custom_agent): +async def test_run_async_error_response( + mock_async_client_class, sample_custom_agent +): try: mock_response = Mock() mock_response.status_code = 400 mock_response.text = "Bad Request" mock_client_instance = AsyncMock() - mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) + mock_client_instance.__aenter__ = AsyncMock( + return_value=mock_client_instance + ) mock_client_instance.__aexit__ = AsyncMock(return_value=None) - mock_client_instance.post = AsyncMock(return_value=mock_response) + mock_client_instance.post = AsyncMock( + return_value=mock_response + ) mock_async_client_class.return_value = mock_client_instance test_payload = {"message": "test"} @@ -367,4 +440,3 @@ def test_agent_response_dataclass(): except Exception as e: logger.error(f"Failed to test AgentResponse dataclass: {e}") raise - diff --git a/tests/structs/test_deep_discussion.py b/tests/structs/test_deep_discussion.py index f83a00c5..76aecd00 100644 --- a/tests/structs/test_deep_discussion.py +++ b/tests/structs/test_deep_discussion.py @@ -6,8 +6,10 @@ from swarms.structs.agent import Agent def create_function_agent(name: str, system_prompt: str = None): if system_prompt is None: - system_prompt = f"You are {name}. Provide thoughtful responses." - + system_prompt = ( + f"You are {name}. Provide thoughtful responses." + ) + agent = Agent( agent_name=name, agent_description=f"Test agent {name}", @@ -23,11 +25,11 @@ def create_function_agent(name: str, system_prompt: str = None): def sample_agents(): agent1 = create_function_agent( "Debater1", - "You are a debater who argues for the affirmative position. Be concise and direct." + "You are a debater who argues for the affirmative position. Be concise and direct.", ) agent2 = create_function_agent( "Debater2", - "You are a debater who argues for the negative position. Be concise and direct." + "You are a debater who argues for the negative position. Be concise and direct.", ) return [agent1, agent2] @@ -64,7 +66,7 @@ def test_one_on_one_debate_multiple_loops(sample_agents, sample_task): assert result is not None assert isinstance(result, str) assert len(result) > 0 - + result_list = one_on_one_debate( max_loops=max_loops, task=sample_task, @@ -80,7 +82,9 @@ def test_one_on_one_debate_multiple_loops(sample_agents, sample_task): raise -def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): +def test_one_on_one_debate_agent_alternation( + sample_agents, sample_task +): try: max_loops = 4 result = one_on_one_debate( @@ -92,7 +96,7 @@ def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): assert result is not None assert isinstance(result, list) assert len(result) == max_loops - + agent_names = [] for msg in result: if isinstance(msg, dict): @@ -105,8 +109,10 @@ def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): assert agent_names is not None assert len(agent_names) >= 0 if len(agent_names) > 0: - assert "Debater1" in agent_names or "Debater2" in agent_names - + assert ( + "Debater1" in agent_names or "Debater2" in agent_names + ) + if len(agent_names) > 0: debater1_count = agent_names.count("Debater1") debater2_count = agent_names.count("Debater2") @@ -137,7 +143,9 @@ def test_one_on_one_debate_with_image(sample_agents): raise -def test_one_on_one_debate_custom_output_types(sample_agents, sample_task): +def test_one_on_one_debate_custom_output_types( + sample_agents, sample_task +): try: output_type_checks = { "str": str, @@ -163,7 +171,9 @@ def test_one_on_one_debate_custom_output_types(sample_agents, sample_task): raise -def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): +def test_one_on_one_debate_list_output_structure( + sample_agents, sample_task +): try: result = one_on_one_debate( max_loops=2, @@ -174,7 +184,7 @@ def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): assert result is not None assert isinstance(result, list) assert len(result) == 2 - + for message in result: assert message is not None assert isinstance(message, (str, dict)) @@ -191,7 +201,9 @@ def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): def test_one_on_one_debate_too_few_agents(sample_task): try: single_agent = [create_function_agent("SoloAgent")] - with pytest.raises(ValueError, match="There must be exactly two agents"): + with pytest.raises( + ValueError, match="There must be exactly two agents" + ): one_on_one_debate( max_loops=1, task=sample_task, @@ -210,7 +222,9 @@ def test_one_on_one_debate_too_many_agents(sample_task): create_function_agent("Agent2"), create_function_agent("Agent3"), ] - with pytest.raises(ValueError, match="There must be exactly two agents"): + with pytest.raises( + ValueError, match="There must be exactly two agents" + ): one_on_one_debate( max_loops=1, task=sample_task, @@ -225,7 +239,9 @@ def test_one_on_one_debate_too_many_agents(sample_task): def test_one_on_one_debate_empty_agents(sample_task): try: empty_agents = [] - with pytest.raises(ValueError, match="There must be exactly two agents"): + with pytest.raises( + ValueError, match="There must be exactly two agents" + ): one_on_one_debate( max_loops=1, task=sample_task, @@ -265,7 +281,9 @@ def test_one_on_one_debate_none_task(sample_agents): raise -def test_one_on_one_debate_invalid_output_type(sample_agents, sample_task): +def test_one_on_one_debate_invalid_output_type( + sample_agents, sample_task +): try: with pytest.raises((ValueError, TypeError)): one_on_one_debate( @@ -289,7 +307,7 @@ def test_one_on_one_debate_zero_loops(sample_agents, sample_task): ) assert result is not None assert isinstance(result, str) - + result_list = one_on_one_debate( max_loops=0, task=sample_task, @@ -327,7 +345,9 @@ def test_one_on_one_debate_different_topics(sample_agents): raise -def test_one_on_one_debate_long_conversation(sample_agents, sample_task): +def test_one_on_one_debate_long_conversation( + sample_agents, sample_task +): try: max_loops = 5 result = one_on_one_debate( @@ -349,11 +369,11 @@ def test_one_on_one_debate_different_agent_personalities(): try: agent1 = create_function_agent( "Optimist", - "You are an optimist. Always see the positive side. Be concise." + "You are an optimist. Always see the positive side. Be concise.", ) agent2 = create_function_agent( "Pessimist", - "You are a pessimist. Always see the negative side. Be concise." + "You are a pessimist. Always see the negative side. Be concise.", ) agents = [agent1, agent2] task = "What is the future of AI?" @@ -366,7 +386,7 @@ def test_one_on_one_debate_different_agent_personalities(): assert result is not None assert isinstance(result, list) assert len(result) == 2 - + agent_names = [] for msg in result: if isinstance(msg, dict): @@ -379,14 +399,19 @@ def test_one_on_one_debate_different_agent_personalities(): assert agent_names is not None assert len(agent_names) >= 0 if len(agent_names) > 0: - assert "Optimist" in agent_names or "Pessimist" in agent_names + assert ( + "Optimist" in agent_names + or "Pessimist" in agent_names + ) logger.info("Different agent personalities test passed") except Exception as e: logger.error(f"Failed to test different personalities: {e}") raise -def test_one_on_one_debate_conversation_length_matches_loops(sample_agents, sample_task): +def test_one_on_one_debate_conversation_length_matches_loops( + sample_agents, sample_task +): try: for max_loops in [1, 2, 3, 4]: result = one_on_one_debate( @@ -404,7 +429,9 @@ def test_one_on_one_debate_conversation_length_matches_loops(sample_agents, samp raise -def test_one_on_one_debate_both_agents_participate(sample_agents, sample_task): +def test_one_on_one_debate_both_agents_participate( + sample_agents, sample_task +): try: result = one_on_one_debate( max_loops=2, @@ -415,7 +442,7 @@ def test_one_on_one_debate_both_agents_participate(sample_agents, sample_task): assert result is not None assert isinstance(result, list) assert len(result) == 2 - + roles = [] for msg in result: if isinstance(msg, dict) and "role" in msg: diff --git a/tests/structs/test_graph_workflow.py b/tests/structs/test_graph_workflow.py new file mode 100644 index 00000000..a00eecb0 --- /dev/null +++ b/tests/structs/test_graph_workflow.py @@ -0,0 +1,552 @@ +import pytest +from swarms.structs.graph_workflow import ( + GraphWorkflow, + Node, + NodeType, +) +from swarms.structs.agent import Agent + +try: + import rustworkx as rx + + RUSTWORKX_AVAILABLE = True +except ImportError: + RUSTWORKX_AVAILABLE = False + + +def create_test_agent(name: str, description: str = None) -> Agent: + """Create a real agent for testing""" + if description is None: + description = f"Test agent for {name} operations" + + return Agent( + agent_name=name, + agent_description=description, + model_name="gpt-4o-mini", + verbose=False, + print_on=False, + max_loops=1, + ) + + +def test_graph_workflow_basic_node_creation(): + """Test basic GraphWorkflow node creation with real agents""" + # Test basic node creation + agent = create_test_agent( + "TestAgent", "Test agent for node creation" + ) + node = Node.from_agent(agent) + assert node.id == "TestAgent" + assert node.type == NodeType.AGENT + assert node.agent == agent + + # Test node with custom id + node2 = Node(id="CustomID", type=NodeType.AGENT, agent=agent) + assert node2.id == "CustomID" + + +def test_graph_workflow_multi_agent_collaboration(): + """Test GraphWorkflow with multiple agents in a collaboration scenario""" + # Create specialized agents for a business analysis workflow + market_researcher = create_test_agent( + "Market-Researcher", + "Specialist in market analysis and trend identification", + ) + + data_analyst = create_test_agent( + "Data-Analyst", + "Expert in data processing and statistical analysis", + ) + + strategy_consultant = create_test_agent( + "Strategy-Consultant", + "Senior consultant for strategic planning and recommendations", + ) + + # Create workflow with linear execution path + workflow = GraphWorkflow(name="Business-Analysis-Workflow") + workflow.add_node(market_researcher) + workflow.add_node(data_analyst) + workflow.add_node(strategy_consultant) + + # Add edges to define execution order + workflow.add_edge("Market-Researcher", "Data-Analyst") + workflow.add_edge("Data-Analyst", "Strategy-Consultant") + + # Test workflow execution + result = workflow.run( + "Analyze market opportunities for AI in healthcare" + ) + assert result is not None + + +def test_graph_workflow_parallel_execution(): + """Test GraphWorkflow with parallel execution paths""" + # Create agents for parallel analysis + technical_analyst = create_test_agent( + "Technical-Analyst", + "Technical feasibility and implementation analysis", + ) + + market_analyst = create_test_agent( + "Market-Analyst", + "Market positioning and competitive analysis", + ) + + financial_analyst = create_test_agent( + "Financial-Analyst", "Financial modeling and ROI analysis" + ) + + risk_assessor = create_test_agent( + "Risk-Assessor", "Risk assessment and mitigation planning" + ) + + # Create workflow with parallel execution + workflow = GraphWorkflow(name="Parallel-Analysis-Workflow") + workflow.add_node(technical_analyst) + workflow.add_node(market_analyst) + workflow.add_node(financial_analyst) + workflow.add_node(risk_assessor) + + # Add edges for fan-out execution (one to many) + workflow.add_edges_from_source( + "Technical-Analyst", + ["Market-Analyst", "Financial-Analyst", "Risk-Assessor"], + ) + + # Test parallel execution + result = workflow.run( + "Evaluate feasibility of launching a new fintech platform" + ) + assert result is not None + + +def test_graph_workflow_complex_topology(): + """Test GraphWorkflow with complex node topology""" + # Create agents for a comprehensive product development workflow + product_manager = create_test_agent( + "Product-Manager", "Product strategy and roadmap management" + ) + + ux_designer = create_test_agent( + "UX-Designer", "User experience design and research" + ) + + backend_developer = create_test_agent( + "Backend-Developer", + "Backend system architecture and development", + ) + + frontend_developer = create_test_agent( + "Frontend-Developer", + "Frontend interface and user interaction development", + ) + + qa_engineer = create_test_agent( + "QA-Engineer", "Quality assurance and testing specialist" + ) + + devops_engineer = create_test_agent( + "DevOps-Engineer", "Deployment and infrastructure management" + ) + + # Create workflow with complex dependencies + workflow = GraphWorkflow(name="Product-Development-Workflow") + workflow.add_node(product_manager) + workflow.add_node(ux_designer) + workflow.add_node(backend_developer) + workflow.add_node(frontend_developer) + workflow.add_node(qa_engineer) + workflow.add_node(devops_engineer) + + # Define complex execution topology + workflow.add_edge("Product-Manager", "UX-Designer") + workflow.add_edge("UX-Designer", "Frontend-Developer") + workflow.add_edge("Product-Manager", "Backend-Developer") + workflow.add_edge("Backend-Developer", "QA-Engineer") + workflow.add_edge("Frontend-Developer", "QA-Engineer") + workflow.add_edge("QA-Engineer", "DevOps-Engineer") + + # Test complex workflow execution + result = workflow.run( + "Develop a comprehensive e-commerce platform with AI recommendations" + ) + assert result is not None + + +def test_graph_workflow_error_handling(): + """Test GraphWorkflow error handling and validation""" + # Test with empty workflow + workflow = GraphWorkflow() + result = workflow.run("Test task") + # Empty workflow should handle gracefully + assert result is not None + + # Test workflow compilation and caching + researcher = create_test_agent( + "Researcher", "Research specialist" + ) + workflow.add_node(researcher) + + # First run should compile + result1 = workflow.run("Research task") + assert result1 is not None + + # Second run should use cached compilation + result2 = workflow.run("Another research task") + assert result2 is not None + + +def test_graph_workflow_node_metadata(): + """Test GraphWorkflow with node metadata""" + # Create agents with different priorities and requirements + high_priority_agent = create_test_agent( + "High-Priority-Analyst", "High priority analysis specialist" + ) + + standard_agent = create_test_agent( + "Standard-Analyst", "Standard analysis agent" + ) + + # Create workflow and add nodes with metadata + workflow = GraphWorkflow(name="Metadata-Workflow") + workflow.add_node( + high_priority_agent, + metadata={"priority": "high", "timeout": 60}, + ) + workflow.add_node( + standard_agent, metadata={"priority": "normal", "timeout": 30} + ) + + # Add execution dependency + workflow.add_edge("High-Priority-Analyst", "Standard-Analyst") + + # Test execution with metadata + result = workflow.run( + "Analyze business requirements with different priorities" + ) + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_basic(backend): + """Test GraphWorkflow basic functionality with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow = GraphWorkflow( + name=f"Backend-Test-{backend}", backend=backend + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + assert len(workflow.nodes) == 2 + assert len(workflow.edges) == 1 + + result = workflow.run("Test task") + assert result is not None + assert "Agent1" in result + assert "Agent2" in result + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_parallel_execution(backend): + """Test parallel execution with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + coordinator = create_test_agent( + "Coordinator", "Coordinates tasks" + ) + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + + workflow = GraphWorkflow( + name=f"Parallel-Test-{backend}", backend=backend + ) + workflow.add_node(coordinator) + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + + workflow.add_edges_from_source( + coordinator, [analyst1, analyst2, analyst3] + ) + + workflow.compile() + assert len(workflow._sorted_layers) >= 1 + assert ( + len(workflow._sorted_layers[0]) == 1 + ) # Coordinator in first layer + + result = workflow.run("Analyze data in parallel") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_fan_in_pattern(backend): + """Test fan-in pattern with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + synthesizer = create_test_agent( + "Synthesizer", "Synthesizes results" + ) + + workflow = GraphWorkflow( + name=f"FanIn-Test-{backend}", backend=backend + ) + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + workflow.add_node(synthesizer) + + workflow.add_edges_to_target( + [analyst1, analyst2, analyst3], synthesizer + ) + + workflow.compile() + assert len(workflow._sorted_layers) >= 2 + assert synthesizer.agent_name in workflow.end_points + + result = workflow.run("Synthesize multiple analyses") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_parallel_chain(backend): + """Test parallel chain pattern with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + collector1 = create_test_agent("Collector1", "First collector") + collector2 = create_test_agent("Collector2", "Second collector") + processor1 = create_test_agent("Processor1", "First processor") + processor2 = create_test_agent("Processor2", "Second processor") + + workflow = GraphWorkflow( + name=f"ParallelChain-Test-{backend}", backend=backend + ) + workflow.add_node(collector1) + workflow.add_node(collector2) + workflow.add_node(processor1) + workflow.add_node(processor2) + + workflow.add_parallel_chain( + [collector1, collector2], [processor1, processor2] + ) + + workflow.compile() + assert len(workflow.edges) == 4 # 2x2 = 4 edges + + result = workflow.run("Process data from multiple collectors") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_complex_topology(backend): + """Test complex topology with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") for i in range(5) + ] + + workflow = GraphWorkflow( + name=f"Complex-Topology-{backend}", backend=backend + ) + for agent in agents: + workflow.add_node(agent) + + workflow.add_edge(agents[0], agents[1]) + workflow.add_edge(agents[0], agents[2]) + workflow.add_edge(agents[1], agents[3]) + workflow.add_edge(agents[2], agents[3]) + workflow.add_edge(agents[3], agents[4]) + + workflow.compile() + assert len(workflow._sorted_layers) >= 3 + + result = workflow.run("Execute complex workflow") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_validation(backend): + """Test workflow validation with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + isolated = create_test_agent("Isolated", "Isolated agent") + + workflow = GraphWorkflow( + name=f"Validation-Test-{backend}", backend=backend + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(isolated) + workflow.add_edge(agent1, agent2) + + validation = workflow.validate(auto_fix=False) + assert isinstance(validation, dict) + assert "is_valid" in validation + + validation_fixed = workflow.validate(auto_fix=True) + assert isinstance(validation_fixed, dict) + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_entry_end_points(backend): + """Test entry and end points with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "Entry agent") + agent2 = create_test_agent("Agent2", "Middle agent") + agent3 = create_test_agent("Agent3", "End agent") + + workflow = GraphWorkflow( + name=f"EntryEnd-Test-{backend}", backend=backend + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + workflow.auto_set_entry_points() + workflow.auto_set_end_points() + + assert agent1.agent_name in workflow.entry_points + assert agent3.agent_name in workflow.end_points + + +def test_graph_workflow_rustworkx_specific(): + """Test rustworkx-specific features""" + if not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow = GraphWorkflow( + name="Rustworkx-Specific-Test", backend="rustworkx" + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + assert ( + workflow.graph_backend.__class__.__name__ + == "RustworkxBackend" + ) + assert hasattr(workflow.graph_backend, "_node_id_to_index") + assert hasattr(workflow.graph_backend, "_index_to_node_id") + + workflow.compile() + assert len(workflow._sorted_layers) == 3 + + predecessors = list( + workflow.graph_backend.predecessors(agent2.agent_name) + ) + assert agent1.agent_name in predecessors + + descendants = workflow.graph_backend.descendants( + agent1.agent_name + ) + assert agent2.agent_name in descendants + assert agent3.agent_name in descendants + + result = workflow.run("Test rustworkx backend") + assert result is not None + + +def test_graph_workflow_rustworkx_large_scale(): + """Test rustworkx with larger workflow""" + if not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(10) + ] + + workflow = GraphWorkflow( + name="Rustworkx-Large-Scale", backend="rustworkx" + ) + for agent in agents: + workflow.add_node(agent) + + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + workflow.compile() + assert len(workflow._sorted_layers) == 10 + + result = workflow.run("Test large scale workflow") + assert result is not None + assert len(result) == 10 + + +def test_graph_workflow_rustworkx_agent_objects(): + """Test rustworkx with Agent objects directly in edges""" + if not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow = GraphWorkflow( + name="Rustworkx-Agent-Objects", backend="rustworkx" + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edges_from_source(agent1, [agent2, agent3]) + workflow.add_edges_to_target([agent2, agent3], agent1) + + workflow.compile() + assert len(workflow.edges) == 4 + + result = workflow.run("Test agent objects in edges") + assert result is not None + + +def test_graph_workflow_backend_fallback(): + """Test backend fallback when rustworkx unavailable""" + workflow = GraphWorkflow( + name="Fallback-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Test agent") + workflow.add_node(agent) + + if not RUSTWORKX_AVAILABLE: + assert ( + workflow.graph_backend.__class__.__name__ + == "NetworkXBackend" + ) + else: + assert ( + workflow.graph_backend.__class__.__name__ + == "RustworkxBackend" + ) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/structs/test_graph_workflow_comprehensive.py b/tests/structs/test_graph_workflow_comprehensive.py deleted file mode 100644 index 5cb6a4a6..00000000 --- a/tests/structs/test_graph_workflow_comprehensive.py +++ /dev/null @@ -1,225 +0,0 @@ -import pytest -from swarms.structs.graph_workflow import ( - GraphWorkflow, - Node, - NodeType, -) -from swarms.structs.agent import Agent - - -def create_test_agent(name: str, description: str = None) -> Agent: - """Create a real agent for testing""" - if description is None: - description = f"Test agent for {name} operations" - - return Agent( - agent_name=name, - agent_description=description, - model_name="gpt-4o-mini", - verbose=False, - print_on=False, - max_loops=1, - ) - - -def test_graph_workflow_basic_node_creation(): - """Test basic GraphWorkflow node creation with real agents""" - # Test basic node creation - agent = create_test_agent( - "TestAgent", "Test agent for node creation" - ) - node = Node.from_agent(agent) - assert node.id == "TestAgent" - assert node.type == NodeType.AGENT - assert node.agent == agent - - # Test node with custom id - node2 = Node(id="CustomID", type=NodeType.AGENT, agent=agent) - assert node2.id == "CustomID" - - -def test_graph_workflow_multi_agent_collaboration(): - """Test GraphWorkflow with multiple agents in a collaboration scenario""" - # Create specialized agents for a business analysis workflow - market_researcher = create_test_agent( - "Market-Researcher", - "Specialist in market analysis and trend identification", - ) - - data_analyst = create_test_agent( - "Data-Analyst", - "Expert in data processing and statistical analysis", - ) - - strategy_consultant = create_test_agent( - "Strategy-Consultant", - "Senior consultant for strategic planning and recommendations", - ) - - # Create workflow with linear execution path - workflow = GraphWorkflow(name="Business-Analysis-Workflow") - workflow.add_node(market_researcher) - workflow.add_node(data_analyst) - workflow.add_node(strategy_consultant) - - # Add edges to define execution order - workflow.add_edge("Market-Researcher", "Data-Analyst") - workflow.add_edge("Data-Analyst", "Strategy-Consultant") - - # Test workflow execution - result = workflow.run( - "Analyze market opportunities for AI in healthcare" - ) - assert result is not None - - -def test_graph_workflow_parallel_execution(): - """Test GraphWorkflow with parallel execution paths""" - # Create agents for parallel analysis - technical_analyst = create_test_agent( - "Technical-Analyst", - "Technical feasibility and implementation analysis", - ) - - market_analyst = create_test_agent( - "Market-Analyst", - "Market positioning and competitive analysis", - ) - - financial_analyst = create_test_agent( - "Financial-Analyst", "Financial modeling and ROI analysis" - ) - - risk_assessor = create_test_agent( - "Risk-Assessor", "Risk assessment and mitigation planning" - ) - - # Create workflow with parallel execution - workflow = GraphWorkflow(name="Parallel-Analysis-Workflow") - workflow.add_node(technical_analyst) - workflow.add_node(market_analyst) - workflow.add_node(financial_analyst) - workflow.add_node(risk_assessor) - - # Add edges for fan-out execution (one to many) - workflow.add_edges_from_source( - "Technical-Analyst", - ["Market-Analyst", "Financial-Analyst", "Risk-Assessor"], - ) - - # Test parallel execution - result = workflow.run( - "Evaluate feasibility of launching a new fintech platform" - ) - assert result is not None - - -def test_graph_workflow_complex_topology(): - """Test GraphWorkflow with complex node topology""" - # Create agents for a comprehensive product development workflow - product_manager = create_test_agent( - "Product-Manager", "Product strategy and roadmap management" - ) - - ux_designer = create_test_agent( - "UX-Designer", "User experience design and research" - ) - - backend_developer = create_test_agent( - "Backend-Developer", - "Backend system architecture and development", - ) - - frontend_developer = create_test_agent( - "Frontend-Developer", - "Frontend interface and user interaction development", - ) - - qa_engineer = create_test_agent( - "QA-Engineer", "Quality assurance and testing specialist" - ) - - devops_engineer = create_test_agent( - "DevOps-Engineer", "Deployment and infrastructure management" - ) - - # Create workflow with complex dependencies - workflow = GraphWorkflow(name="Product-Development-Workflow") - workflow.add_node(product_manager) - workflow.add_node(ux_designer) - workflow.add_node(backend_developer) - workflow.add_node(frontend_developer) - workflow.add_node(qa_engineer) - workflow.add_node(devops_engineer) - - # Define complex execution topology - workflow.add_edge("Product-Manager", "UX-Designer") - workflow.add_edge("UX-Designer", "Frontend-Developer") - workflow.add_edge("Product-Manager", "Backend-Developer") - workflow.add_edge("Backend-Developer", "QA-Engineer") - workflow.add_edge("Frontend-Developer", "QA-Engineer") - workflow.add_edge("QA-Engineer", "DevOps-Engineer") - - # Test complex workflow execution - result = workflow.run( - "Develop a comprehensive e-commerce platform with AI recommendations" - ) - assert result is not None - - -def test_graph_workflow_error_handling(): - """Test GraphWorkflow error handling and validation""" - # Test with empty workflow - workflow = GraphWorkflow() - result = workflow.run("Test task") - # Empty workflow should handle gracefully - assert result is not None - - # Test workflow compilation and caching - researcher = create_test_agent( - "Researcher", "Research specialist" - ) - workflow.add_node(researcher) - - # First run should compile - result1 = workflow.run("Research task") - assert result1 is not None - - # Second run should use cached compilation - result2 = workflow.run("Another research task") - assert result2 is not None - - -def test_graph_workflow_node_metadata(): - """Test GraphWorkflow with node metadata""" - # Create agents with different priorities and requirements - high_priority_agent = create_test_agent( - "High-Priority-Analyst", "High priority analysis specialist" - ) - - standard_agent = create_test_agent( - "Standard-Analyst", "Standard analysis agent" - ) - - # Create workflow and add nodes with metadata - workflow = GraphWorkflow(name="Metadata-Workflow") - workflow.add_node( - high_priority_agent, - metadata={"priority": "high", "timeout": 60}, - ) - workflow.add_node( - standard_agent, metadata={"priority": "normal", "timeout": 30} - ) - - # Add execution dependency - workflow.add_edge("High-Priority-Analyst", "Standard-Analyst") - - # Test execution with metadata - result = workflow.run( - "Analyze business requirements with different priorities" - ) - assert result is not None - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/tests/structs/test_multi_agent_debate.py b/tests/structs/test_multi_agent_debate.py index 12737b3b..806a94c6 100644 --- a/tests/structs/test_multi_agent_debate.py +++ b/tests/structs/test_multi_agent_debate.py @@ -18,8 +18,10 @@ from swarms.structs.agent import Agent def create_function_agent(name: str, system_prompt: str = None): if system_prompt is None: - system_prompt = f"You are {name}. Provide concise and direct responses." - + system_prompt = ( + f"You are {name}. Provide concise and direct responses." + ) + agent = Agent( agent_name=name, agent_description=f"Test agent {name}", @@ -34,12 +36,10 @@ def create_function_agent(name: str, system_prompt: str = None): @pytest.fixture def sample_two_agents(): agent1 = create_function_agent( - "Agent1", - "You are Agent1. Provide concise responses." + "Agent1", "You are Agent1. Provide concise responses." ) agent2 = create_function_agent( - "Agent2", - "You are Agent2. Provide concise responses." + "Agent2", "You are Agent2. Provide concise responses." ) return [agent1, agent2] @@ -71,7 +71,9 @@ def test_one_on_one_debate_initialization(sample_two_agents): assert debate.output_type == "str-all-except-first" logger.info("OneOnOneDebate initialization test passed") except Exception as e: - logger.error(f"Failed to test OneOnOneDebate initialization: {e}") + logger.error( + f"Failed to test OneOnOneDebate initialization: {e}" + ) raise @@ -95,7 +97,9 @@ def test_one_on_one_debate_run(sample_two_agents, sample_task): raise -def test_one_on_one_debate_wrong_number_of_agents(sample_three_agents, sample_task): +def test_one_on_one_debate_wrong_number_of_agents( + sample_three_agents, sample_task +): try: debate = OneOnOneDebate( max_loops=2, @@ -104,13 +108,19 @@ def test_one_on_one_debate_wrong_number_of_agents(sample_three_agents, sample_ta ) with pytest.raises(ValueError, match="exactly two agents"): debate.run(sample_task) - logger.info("OneOnOneDebate wrong number of agents test passed") + logger.info( + "OneOnOneDebate wrong number of agents test passed" + ) except Exception as e: - logger.error(f"Failed to test OneOnOneDebate wrong number of agents: {e}") + logger.error( + f"Failed to test OneOnOneDebate wrong number of agents: {e}" + ) raise -def test_one_on_one_debate_output_types(sample_two_agents, sample_task): +def test_one_on_one_debate_output_types( + sample_two_agents, sample_task +): try: assert sample_two_agents is not None assert sample_task is not None @@ -133,7 +143,9 @@ def test_one_on_one_debate_output_types(sample_two_agents, sample_task): assert isinstance(result, str) logger.info("OneOnOneDebate output types test passed") except Exception as e: - logger.error(f"Failed to test OneOnOneDebate output types: {e}") + logger.error( + f"Failed to test OneOnOneDebate output types: {e}" + ) raise @@ -175,13 +187,19 @@ def test_expert_panel_discussion_initialization(sample_three_agents): assert panel.max_rounds == 2 assert len(panel.agents) == 3 assert panel.moderator is not None - logger.info("ExpertPanelDiscussion initialization test passed") + logger.info( + "ExpertPanelDiscussion initialization test passed" + ) except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion initialization: {e}") + logger.error( + f"Failed to test ExpertPanelDiscussion initialization: {e}" + ) raise -def test_expert_panel_discussion_run(sample_three_agents, sample_task): +def test_expert_panel_discussion_run( + sample_three_agents, sample_task +): try: moderator = create_function_agent("Moderator") assert moderator is not None @@ -217,15 +235,23 @@ def test_expert_panel_discussion_insufficient_agents(sample_task): output_type="str-all-except-first", ) assert panel is not None - with pytest.raises(ValueError, match="At least two expert agents"): + with pytest.raises( + ValueError, match="At least two expert agents" + ): panel.run(sample_task) - logger.info("ExpertPanelDiscussion insufficient agents test passed") + logger.info( + "ExpertPanelDiscussion insufficient agents test passed" + ) except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion insufficient agents: {e}") + logger.error( + f"Failed to test ExpertPanelDiscussion insufficient agents: {e}" + ) raise -def test_expert_panel_discussion_no_moderator(sample_three_agents, sample_task): +def test_expert_panel_discussion_no_moderator( + sample_three_agents, sample_task +): try: panel = ExpertPanelDiscussion( max_rounds=2, @@ -233,11 +259,15 @@ def test_expert_panel_discussion_no_moderator(sample_three_agents, sample_task): moderator=None, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="moderator agent is required"): + with pytest.raises( + ValueError, match="moderator agent is required" + ): panel.run(sample_task) logger.info("ExpertPanelDiscussion no moderator test passed") except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion no moderator: {e}") + logger.error( + f"Failed to test ExpertPanelDiscussion no moderator: {e}" + ) raise @@ -257,7 +287,9 @@ def test_round_table_discussion_initialization(sample_three_agents): assert round_table.facilitator is not None logger.info("RoundTableDiscussion initialization test passed") except Exception as e: - logger.error(f"Failed to test RoundTableDiscussion initialization: {e}") + logger.error( + f"Failed to test RoundTableDiscussion initialization: {e}" + ) raise @@ -292,15 +324,23 @@ def test_round_table_discussion_insufficient_agents(sample_task): facilitator=facilitator, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="At least two participants"): + with pytest.raises( + ValueError, match="At least two participants" + ): round_table.run(sample_task) - logger.info("RoundTableDiscussion insufficient agents test passed") + logger.info( + "RoundTableDiscussion insufficient agents test passed" + ) except Exception as e: - logger.error(f"Failed to test RoundTableDiscussion insufficient agents: {e}") + logger.error( + f"Failed to test RoundTableDiscussion insufficient agents: {e}" + ) raise -def test_round_table_discussion_no_facilitator(sample_three_agents, sample_task): +def test_round_table_discussion_no_facilitator( + sample_three_agents, sample_task +): try: round_table = RoundTableDiscussion( max_cycles=2, @@ -308,11 +348,15 @@ def test_round_table_discussion_no_facilitator(sample_three_agents, sample_task) facilitator=None, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="facilitator agent is required"): + with pytest.raises( + ValueError, match="facilitator agent is required" + ): round_table.run(sample_task) logger.info("RoundTableDiscussion no facilitator test passed") except Exception as e: - logger.error(f"Failed to test RoundTableDiscussion no facilitator: {e}") + logger.error( + f"Failed to test RoundTableDiscussion no facilitator: {e}" + ) raise @@ -338,7 +382,9 @@ def test_interview_series_initialization(): assert interview.follow_up_depth == 1 logger.info("InterviewSeries initialization test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries initialization: {e}") + logger.error( + f"Failed to test InterviewSeries initialization: {e}" + ) raise @@ -378,11 +424,15 @@ def test_interview_series_no_interviewer(sample_task): follow_up_depth=1, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both interviewer and interviewee"): + with pytest.raises( + ValueError, match="Both interviewer and interviewee" + ): interview.run(sample_task) logger.info("InterviewSeries no interviewer test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries no interviewer: {e}") + logger.error( + f"Failed to test InterviewSeries no interviewer: {e}" + ) raise @@ -396,11 +446,15 @@ def test_interview_series_no_interviewee(sample_task): follow_up_depth=1, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both interviewer and interviewee"): + with pytest.raises( + ValueError, match="Both interviewer and interviewee" + ): interview.run(sample_task) logger.info("InterviewSeries no interviewee test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries no interviewee: {e}") + logger.error( + f"Failed to test InterviewSeries no interviewee: {e}" + ) raise @@ -425,13 +479,18 @@ def test_interview_series_default_questions(sample_task): assert len(result) >= 0 logger.info("InterviewSeries default questions test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries default questions: {e}") + logger.error( + f"Failed to test InterviewSeries default questions: {e}" + ) raise def test_peer_review_process_initialization(): try: - reviewers = [create_function_agent("Reviewer1"), create_function_agent("Reviewer2")] + reviewers = [ + create_function_agent("Reviewer1"), + create_function_agent("Reviewer2"), + ] assert reviewers is not None assert len(reviewers) == 2 assert reviewers[0] is not None @@ -450,13 +509,18 @@ def test_peer_review_process_initialization(): assert peer_review.review_rounds == 2 logger.info("PeerReviewProcess initialization test passed") except Exception as e: - logger.error(f"Failed to test PeerReviewProcess initialization: {e}") + logger.error( + f"Failed to test PeerReviewProcess initialization: {e}" + ) raise def test_peer_review_process_run(sample_task): try: - reviewers = [create_function_agent("Reviewer1"), create_function_agent("Reviewer2")] + reviewers = [ + create_function_agent("Reviewer1"), + create_function_agent("Reviewer2"), + ] assert reviewers is not None assert len(reviewers) == 2 author = create_function_agent("Author") @@ -491,7 +555,9 @@ def test_peer_review_process_no_reviewers(sample_task): peer_review.run(sample_task) logger.info("PeerReviewProcess no reviewers test passed") except Exception as e: - logger.error(f"Failed to test PeerReviewProcess no reviewers: {e}") + logger.error( + f"Failed to test PeerReviewProcess no reviewers: {e}" + ) raise @@ -504,11 +570,15 @@ def test_peer_review_process_no_author(sample_task): review_rounds=2, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="author agent is required"): + with pytest.raises( + ValueError, match="author agent is required" + ): peer_review.run(sample_task) logger.info("PeerReviewProcess no author test passed") except Exception as e: - logger.error(f"Failed to test PeerReviewProcess no author: {e}") + logger.error( + f"Failed to test PeerReviewProcess no author: {e}" + ) raise @@ -529,7 +599,9 @@ def test_mediation_session_initialization(sample_two_agents): assert mediation.max_sessions == 2 logger.info("MediationSession initialization test passed") except Exception as e: - logger.error(f"Failed to test MediationSession initialization: {e}") + logger.error( + f"Failed to test MediationSession initialization: {e}" + ) raise @@ -567,13 +639,19 @@ def test_mediation_session_insufficient_parties(sample_task): ) with pytest.raises(ValueError, match="At least two parties"): mediation.run(sample_task) - logger.info("MediationSession insufficient parties test passed") + logger.info( + "MediationSession insufficient parties test passed" + ) except Exception as e: - logger.error(f"Failed to test MediationSession insufficient parties: {e}") + logger.error( + f"Failed to test MediationSession insufficient parties: {e}" + ) raise -def test_mediation_session_no_mediator(sample_two_agents, sample_task): +def test_mediation_session_no_mediator( + sample_two_agents, sample_task +): try: mediation = MediationSession( parties=sample_two_agents, @@ -581,11 +659,15 @@ def test_mediation_session_no_mediator(sample_two_agents, sample_task): max_sessions=2, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="mediator agent is required"): + with pytest.raises( + ValueError, match="mediator agent is required" + ): mediation.run(sample_task) logger.info("MediationSession no mediator test passed") except Exception as e: - logger.error(f"Failed to test MediationSession no mediator: {e}") + logger.error( + f"Failed to test MediationSession no mediator: {e}" + ) raise @@ -608,7 +690,9 @@ def test_brainstorming_session_initialization(sample_three_agents): assert brainstorming.build_on_ideas is True logger.info("BrainstormingSession initialization test passed") except Exception as e: - logger.error(f"Failed to test BrainstormingSession initialization: {e}") + logger.error( + f"Failed to test BrainstormingSession initialization: {e}" + ) raise @@ -646,15 +730,23 @@ def test_brainstorming_session_insufficient_participants(sample_task): build_on_ideas=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="At least two participants"): + with pytest.raises( + ValueError, match="At least two participants" + ): brainstorming.run(sample_task) - logger.info("BrainstormingSession insufficient participants test passed") + logger.info( + "BrainstormingSession insufficient participants test passed" + ) except Exception as e: - logger.error(f"Failed to test BrainstormingSession insufficient participants: {e}") + logger.error( + f"Failed to test BrainstormingSession insufficient participants: {e}" + ) raise -def test_brainstorming_session_no_facilitator(sample_three_agents, sample_task): +def test_brainstorming_session_no_facilitator( + sample_three_agents, sample_task +): try: brainstorming = BrainstormingSession( participants=sample_three_agents, @@ -663,11 +755,15 @@ def test_brainstorming_session_no_facilitator(sample_three_agents, sample_task): build_on_ideas=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="facilitator agent is required"): + with pytest.raises( + ValueError, match="facilitator agent is required" + ): brainstorming.run(sample_task) logger.info("BrainstormingSession no facilitator test passed") except Exception as e: - logger.error(f"Failed to test BrainstormingSession no facilitator: {e}") + logger.error( + f"Failed to test BrainstormingSession no facilitator: {e}" + ) raise @@ -699,7 +795,9 @@ def test_trial_simulation_initialization(): assert trial.phases == ["opening", "closing"] logger.info("TrialSimulation initialization test passed") except Exception as e: - logger.error(f"Failed to test TrialSimulation initialization: {e}") + logger.error( + f"Failed to test TrialSimulation initialization: {e}" + ) raise @@ -746,7 +844,9 @@ def test_trial_simulation_no_prosecution(sample_task): trial.run(sample_task) logger.info("TrialSimulation no prosecution test passed") except Exception as e: - logger.error(f"Failed to test TrialSimulation no prosecution: {e}") + logger.error( + f"Failed to test TrialSimulation no prosecution: {e}" + ) raise @@ -774,7 +874,9 @@ def test_trial_simulation_default_phases(sample_task): assert len(result) >= 0 logger.info("TrialSimulation default phases test passed") except Exception as e: - logger.error(f"Failed to test TrialSimulation default phases: {e}") + logger.error( + f"Failed to test TrialSimulation default phases: {e}" + ) raise @@ -797,7 +899,9 @@ def test_council_meeting_initialization(sample_three_agents): assert council.require_consensus is False logger.info("CouncilMeeting initialization test passed") except Exception as e: - logger.error(f"Failed to test CouncilMeeting initialization: {e}") + logger.error( + f"Failed to test CouncilMeeting initialization: {e}" + ) raise @@ -835,15 +939,21 @@ def test_council_meeting_insufficient_members(sample_task): require_consensus=False, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="At least two council members"): + with pytest.raises( + ValueError, match="At least two council members" + ): council.run(sample_task) logger.info("CouncilMeeting insufficient members test passed") except Exception as e: - logger.error(f"Failed to test CouncilMeeting insufficient members: {e}") + logger.error( + f"Failed to test CouncilMeeting insufficient members: {e}" + ) raise -def test_council_meeting_no_chairperson(sample_three_agents, sample_task): +def test_council_meeting_no_chairperson( + sample_three_agents, sample_task +): try: council = CouncilMeeting( council_members=sample_three_agents, @@ -852,11 +962,15 @@ def test_council_meeting_no_chairperson(sample_three_agents, sample_task): require_consensus=False, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="chairperson agent is required"): + with pytest.raises( + ValueError, match="chairperson agent is required" + ): council.run(sample_task) logger.info("CouncilMeeting no chairperson test passed") except Exception as e: - logger.error(f"Failed to test CouncilMeeting no chairperson: {e}") + logger.error( + f"Failed to test CouncilMeeting no chairperson: {e}" + ) raise @@ -880,7 +994,9 @@ def test_mentorship_session_initialization(): assert mentorship.include_feedback is True logger.info("MentorshipSession initialization test passed") except Exception as e: - logger.error(f"Failed to test MentorshipSession initialization: {e}") + logger.error( + f"Failed to test MentorshipSession initialization: {e}" + ) raise @@ -918,11 +1034,15 @@ def test_mentorship_session_no_mentor(sample_task): include_feedback=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both mentor and mentee"): + with pytest.raises( + ValueError, match="Both mentor and mentee" + ): mentorship.run(sample_task) logger.info("MentorshipSession no mentor test passed") except Exception as e: - logger.error(f"Failed to test MentorshipSession no mentor: {e}") + logger.error( + f"Failed to test MentorshipSession no mentor: {e}" + ) raise @@ -936,11 +1056,15 @@ def test_mentorship_session_no_mentee(sample_task): include_feedback=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both mentor and mentee"): + with pytest.raises( + ValueError, match="Both mentor and mentee" + ): mentorship.run(sample_task) logger.info("MentorshipSession no mentee test passed") except Exception as e: - logger.error(f"Failed to test MentorshipSession no mentee: {e}") + logger.error( + f"Failed to test MentorshipSession no mentee: {e}" + ) raise @@ -963,7 +1087,9 @@ def test_negotiation_session_initialization(sample_two_agents): assert negotiation.include_concessions is True logger.info("NegotiationSession initialization test passed") except Exception as e: - logger.error(f"Failed to test NegotiationSession initialization: {e}") + logger.error( + f"Failed to test NegotiationSession initialization: {e}" + ) raise @@ -1003,13 +1129,19 @@ def test_negotiation_session_insufficient_parties(sample_task): ) with pytest.raises(ValueError, match="At least two parties"): negotiation.run(sample_task) - logger.info("NegotiationSession insufficient parties test passed") + logger.info( + "NegotiationSession insufficient parties test passed" + ) except Exception as e: - logger.error(f"Failed to test NegotiationSession insufficient parties: {e}") + logger.error( + f"Failed to test NegotiationSession insufficient parties: {e}" + ) raise -def test_negotiation_session_no_mediator(sample_two_agents, sample_task): +def test_negotiation_session_no_mediator( + sample_two_agents, sample_task +): try: negotiation = NegotiationSession( parties=sample_two_agents, @@ -1018,15 +1150,21 @@ def test_negotiation_session_no_mediator(sample_two_agents, sample_task): include_concessions=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="mediator agent is required"): + with pytest.raises( + ValueError, match="mediator agent is required" + ): negotiation.run(sample_task) logger.info("NegotiationSession no mediator test passed") except Exception as e: - logger.error(f"Failed to test NegotiationSession no mediator: {e}") + logger.error( + f"Failed to test NegotiationSession no mediator: {e}" + ) raise -def test_negotiation_session_without_concessions(sample_two_agents, sample_task): +def test_negotiation_session_without_concessions( + sample_two_agents, sample_task +): try: mediator = create_function_agent("Mediator") assert mediator is not None @@ -1043,13 +1181,19 @@ def test_negotiation_session_without_concessions(sample_two_agents, sample_task) assert result is not None assert isinstance(result, str) assert len(result) >= 0 - logger.info("NegotiationSession without concessions test passed") + logger.info( + "NegotiationSession without concessions test passed" + ) except Exception as e: - logger.error(f"Failed to test NegotiationSession without concessions: {e}") + logger.error( + f"Failed to test NegotiationSession without concessions: {e}" + ) raise -def test_one_on_one_debate_multiple_loops(sample_two_agents, sample_task): +def test_one_on_one_debate_multiple_loops( + sample_two_agents, sample_task +): try: assert sample_two_agents is not None debate = OneOnOneDebate( @@ -1064,11 +1208,15 @@ def test_one_on_one_debate_multiple_loops(sample_two_agents, sample_task): assert len(result) >= 0 logger.info("OneOnOneDebate multiple loops test passed") except Exception as e: - logger.error(f"Failed to test OneOnOneDebate multiple loops: {e}") + logger.error( + f"Failed to test OneOnOneDebate multiple loops: {e}" + ) raise -def test_expert_panel_discussion_output_types(sample_three_agents, sample_task): +def test_expert_panel_discussion_output_types( + sample_three_agents, sample_task +): try: moderator = create_function_agent("Moderator") assert moderator is not None @@ -1093,5 +1241,7 @@ def test_expert_panel_discussion_output_types(sample_three_agents, sample_task): assert isinstance(result, str) logger.info("ExpertPanelDiscussion output types test passed") except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion output types: {e}") - raise \ No newline at end of file + logger.error( + f"Failed to test ExpertPanelDiscussion output types: {e}" + ) + raise From 54acb0a129a9a8752e388fbcf3b2b347e4d72cb3 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 21 Nov 2025 14:43:22 -0800 Subject: [PATCH 08/16] [FEAT][rustworkx integration into GraphWorkflow] [New Examples] [Update GraphWorkflow docs] --- docs/swarms/structs/graph_workflow.md | 208 +++++++++++++++++++++++--- 1 file changed, 191 insertions(+), 17 deletions(-) diff --git a/docs/swarms/structs/graph_workflow.md b/docs/swarms/structs/graph_workflow.md index ef48d8d0..f0182be3 100644 --- a/docs/swarms/structs/graph_workflow.md +++ b/docs/swarms/structs/graph_workflow.md @@ -12,6 +12,7 @@ Key features: |------------------------|-----------------------------------------------------------------------------------------------| | **Agent-based nodes** | Each node represents an agent that can process tasks | | **Directed graph structure** | Edges define the flow of data between agents | +| **Dual backend support** | Choose between NetworkX (compatibility) or Rustworkx (performance) backends | | **Parallel execution** | Multiple agents can run simultaneously within layers | | **Automatic compilation** | Optimizes workflow structure for efficient execution | | **Rich visualization** | Generate visual representations using Graphviz | @@ -25,37 +26,40 @@ graph TB subgraph "GraphWorkflow Architecture" A[GraphWorkflow] --> B[Node Collection] A --> C[Edge Collection] - A --> D[NetworkX Graph] + A --> D[Graph Backend] A --> E[Execution Engine] B --> F[Agent Nodes] C --> G[Directed Edges] - D --> H[Topological Sort] - E --> I[Parallel Execution] - E --> J[Layer Processing] + D --> H[NetworkX Backend] + D --> I[Rustworkx Backend] + D --> J[Topological Sort] + E --> K[Parallel Execution] + E --> L[Layer Processing] subgraph "Node Types" - F --> K[Agent Node] - K --> L[Agent Instance] - K --> M[Node Metadata] + F --> M[Agent Node] + M --> N[Agent Instance] + M --> O[Node Metadata] end subgraph "Edge Types" - G --> N[Simple Edge] - G --> O[Fan-out Edge] - G --> P[Fan-in Edge] - G --> Q[Parallel Chain] + G --> P[Simple Edge] + G --> Q[Fan-out Edge] + G --> R[Fan-in Edge] + G --> S[Parallel Chain] end subgraph "Execution Patterns" - I --> R[Thread Pool] - I --> S[Concurrent Futures] - J --> T[Layer-by-layer] - J --> U[Dependency Resolution] + K --> T[Thread Pool] + K --> U[Concurrent Futures] + L --> V[Layer-by-layer] + L --> W[Dependency Resolution] end end ``` + ## Class Reference | Parameter | Type | Description | Default | @@ -71,6 +75,70 @@ graph TB | `task` | `Optional[str]` | The task to be executed by the workflow | `None` | | `auto_compile` | `bool` | Whether to automatically compile the workflow | `True` | | `verbose` | `bool` | Whether to enable detailed logging | `False` | +| `backend` | `str` | Graph backend to use ("networkx" or "rustworkx") | `"networkx"` | + +## Graph Backends + +GraphWorkflow supports two graph backend implementations, each with different performance characteristics: + +### NetworkX Backend (Default) + +The **NetworkX** backend is the default and most widely compatible option. It provides: + +| Feature | Description | +|---------------------|---------------------------------------------------------| +| āœ… Full compatibility | Works out of the box with no additional dependencies | +| āœ… Mature ecosystem | Well-tested and stable | +| āœ… Rich features | Comprehensive graph algorithms and operations | +| āœ… Python-native | Pure Python implementation | + +**Use NetworkX when:** + +- You need maximum compatibility + +- Working with small to medium-sized graphs (< 1000 nodes) + +- You want zero additional dependencies + +### Rustworkx Backend (High Performance) + +The **Rustworkx** backend provides significant performance improvements for large graphs: + +| Feature | Description | +|--------------------|-----------------------------------------------------------------| +| ⚔ High performance| Rust-based implementation for faster operations | +| ⚔ Memory efficient| Optimized for large-scale graphs | +| ⚔ Scalable | Better performance with graphs containing 1000+ nodes | +| ⚔ Same API | Drop-in replacement with identical interface | + +**Use Rustworkx when:** + +- Working with large graphs (1000+ nodes) + +- Performance is critical + +- You can install additional dependencies + +**Installation:** +```bash +pip install rustworkx +``` + +**Note:** If rustworkx is not installed and you specify `backend="rustworkx"`, GraphWorkflow will automatically fall back to NetworkX with a warning. + +### Backend Selection + +Both backends implement the same `GraphBackend` interface, ensuring complete API compatibility. You can switch between backends without changing your code: + +```python +# Use NetworkX (default) +workflow = GraphWorkflow(backend="networkx") + +# Use Rustworkx for better performance +workflow = GraphWorkflow(backend="rustworkx") +``` + +The backend choice is transparent to the rest of the API - all methods work identically regardless of which backend is used. ### Core Methods @@ -455,7 +523,7 @@ Constructs a workflow from a list of agents and connections. | `entry_points` | `List[str]` | List of entry point node IDs | `None` | | `end_points` | `List[str]` | List of end point node IDs | `None` | | `task` | `str` | Task to be executed by the workflow | `None` | -| `**kwargs` | `Any` | Additional keyword arguments | `{}` | +| `**kwargs` | `Any` | Additional keyword arguments (e.g., `backend`, `verbose`, `auto_compile`) | `{}` | **Returns:** @@ -464,6 +532,7 @@ Constructs a workflow from a list of agents and connections. **Example:** ```python +# Using NetworkX backend (default) workflow = GraphWorkflow.from_spec( agents=[agent1, agent2, agent3], edges=[ @@ -473,10 +542,56 @@ workflow = GraphWorkflow.from_spec( ], task="Analyze market data" ) + +# Using Rustworkx backend for better performance +workflow = GraphWorkflow.from_spec( + agents=[agent1, agent2, agent3], + edges=[ + ("agent1", "agent2"), + ("agent2", "agent3"), + ], + task="Analyze market data", + backend="rustworkx" # Specify backend via kwargs +) ``` ## Examples +### Using Rustworkx Backend for Performance + +```python +from swarms import Agent, GraphWorkflow + +# Create agents +research_agent = Agent( + agent_name="ResearchAgent", + model_name="gpt-4", + max_loops=1 +) + +analysis_agent = Agent( + agent_name="AnalysisAgent", + model_name="gpt-4", + max_loops=1 +) + +# Build workflow with rustworkx backend for better performance +workflow = GraphWorkflow( + name="High-Performance-Workflow", + backend="rustworkx" # Use rustworkx backend +) + +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) +workflow.add_edge("ResearchAgent", "AnalysisAgent") + +# Execute - backend is transparent to the API +results = workflow.run("What are the latest trends in AI?") +print(results) +``` + +**Note:** Make sure to install rustworkx first: `pip install rustworkx` + ### Basic Sequential Workflow ```python @@ -667,6 +782,46 @@ loaded_workflow = GraphWorkflow.load_from_file( new_results = loaded_workflow.run("Continue with quantum cryptography analysis") ``` +### Large-Scale Workflow with Rustworkx + +```python +from swarms import Agent, GraphWorkflow + +# Create a large workflow with many agents +# Rustworkx backend provides better performance for large graphs +workflow = GraphWorkflow( + name="Large-Scale-Workflow", + backend="rustworkx", # Use rustworkx for better performance + verbose=True +) + +# Create many agents (e.g., for parallel data processing) +agents = [] +for i in range(50): + agent = Agent( + agent_name=f"Processor{i}", + model_name="gpt-4", + max_loops=1 + ) + agents.append(agent) + workflow.add_node(agent) + +# Create complex interconnections +# Rustworkx handles this efficiently +for i in range(0, 50, 10): + source_agents = [f"Processor{j}" for j in range(i, min(i+10, 50))] + target_agents = [f"Processor{j}" for j in range(i+10, min(i+20, 50))] + if target_agents: + workflow.add_parallel_chain(source_agents, target_agents) + +# Compile and execute +workflow.compile() +status = workflow.get_compilation_status() +print(f"Compiled workflow with {status['cached_layers_count']} layers") + +results = workflow.run("Process large dataset in parallel") +``` + ### Advanced Pattern Detection ```python @@ -770,7 +925,8 @@ The `GraphWorkflow` class provides a powerful and flexible framework for orchest |-----------------|--------------------------------------------------------------------------------------------------| | **Scalability** | Supports workflows with hundreds of agents through efficient parallel execution | | **Flexibility** | Multiple connection patterns (sequential, fan-out, fan-in, parallel chains) | -| **Performance** | Automatic compilation and optimization for faster execution | +| **Performance** | Automatic compilation and optimization for faster execution; rustworkx backend for large-scale graphs | +| **Backend Choice** | Choose between NetworkX (compatibility) or Rustworkx (performance) based on your needs | | **Visualization** | Rich visual representations for workflow understanding and debugging | | **Persistence** | Complete serialization and deserialization capabilities | | **Error Handling** | Comprehensive error handling and recovery mechanisms | @@ -793,10 +949,28 @@ The `GraphWorkflow` class provides a powerful and flexible framework for orchest |---------------------------------------|------------------------------------------------------------------| | **Use meaningful agent names** | Helps with debugging and visualization | | **Leverage parallel patterns** | Use fan-out and fan-in for better performance | +| **Choose the right backend** | Use rustworkx for large graphs (1000+ nodes), networkx for smaller graphs | | **Compile workflows** | Always compile before execution for optimal performance | | **Monitor execution** | Use verbose mode and status reporting for debugging | | **Save important workflows** | Use serialization for workflow persistence | | **Handle errors gracefully** | Implement proper error handling and recovery | | **Visualize complex workflows** | Use visualization to understand and debug workflows | +### Backend Performance Considerations + +When choosing between NetworkX and Rustworkx backends: + +| Graph Size | Recommended Backend | Reason | +|------------|-------------------|--------| +| < 100 nodes | NetworkX | Minimal overhead, no extra dependencies | +| 100-1000 nodes | NetworkX or Rustworkx | Both perform well, choose based on dependency preferences | +| 1000+ nodes | Rustworkx | Significant performance benefits for large graphs | +| Very large graphs (10k+ nodes) | Rustworkx | Essential for acceptable performance | + +**Performance Tips:** +- Rustworkx provides 2-10x speedup for topological operations on large graphs +- Both backends support the same features and API +- You can switch backends without code changes +- Rustworkx uses less memory for large graphs + The GraphWorkflow system represents a significant advancement in multi-agent orchestration, providing the tools needed to build complex, scalable, and maintainable AI workflows. \ No newline at end of file From e5c29609124288f3c9fcad24a2e29678884695b8 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Fri, 21 Nov 2025 14:56:12 -0800 Subject: [PATCH 09/16] example guide on graph workflow with agentic patterns --- docs/mkdocs.yml | 1 + .../graphworkflow_rustworkx_patterns.md | 1479 +++++++++++++++++ 2 files changed, 1480 insertions(+) create mode 100644 docs/swarms/examples/graphworkflow_rustworkx_patterns.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 1619374f..5b70d5f6 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -399,6 +399,7 @@ nav: - SwarmRouter Example: "swarms/examples/swarm_router.md" - MultiAgentRouter Minimal Example: "swarms/examples/multi_agent_router_minimal.md" - ConcurrentWorkflow Example: "swarms/examples/concurrent_workflow.md" + - Multi-Agentic Patterns with GraphWorkflow: "swarms/examples/graphworkflow_rustworkx_patterns.md" - Mixture of Agents Example: "swarms/examples/moa_example.md" - Unique Swarms: "swarms/examples/unique_swarms.md" - Agents as Tools: "swarms/examples/agents_as_tools.md" diff --git a/docs/swarms/examples/graphworkflow_rustworkx_patterns.md b/docs/swarms/examples/graphworkflow_rustworkx_patterns.md new file mode 100644 index 00000000..5d392c49 --- /dev/null +++ b/docs/swarms/examples/graphworkflow_rustworkx_patterns.md @@ -0,0 +1,1479 @@ +# GraphWorkflow with Rustworkx: Complete Patterns Guide + +A comprehensive guide to implementing various agentic patterns using GraphWorkflow with the rustworkx backend for optimal performance. + +## Table of Contents + +1. [Introduction](#introduction) +2. [Basic Patterns](#basic-patterns) +3. [Hierarchical Patterns](#hierarchical-patterns) +4. [Concurrent/Parallel Patterns](#concurrentparallel-patterns) +5. [Majority Voting Patterns](#majority-voting-patterns) +6. [Fan-Out/Fan-In Patterns](#fan-outfan-in-patterns) +7. [Sequential Patterns](#sequential-patterns) +8. [Advanced Patterns](#advanced-patterns) +9. [Performance Optimization](#performance-optimization) + +## Introduction + +GraphWorkflow with rustworkx backend provides a high-performance framework for orchestrating complex multi-agent workflows. This guide demonstrates how to implement various agentic patterns that are commonly used in production systems. + +### Why Rustworkx? + +- **Performance**: 2-10x faster for large graphs (1000+ nodes) +- **Memory Efficiency**: Optimized for large-scale workflows +- **Scalability**: Better performance with complex graph operations +- **API Compatibility**: Drop-in replacement for NetworkX backend + +### Installation + +```bash +pip install rustworkx +``` + +## Basic Patterns + +### Simple Sequential Workflow + +The most basic pattern - agents execute one after another in sequence. + +**Architecture Diagram:** + +```mermaid +graph LR + A[ResearchAgent] --> B[AnalysisAgent] + B --> C[SynthesisAgent] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Create agents +research_agent = Agent( + agent_name="ResearchAgent", + model_name="gpt-4o-mini", + max_loops=1, +) + +analysis_agent = Agent( + agent_name="AnalysisAgent", + model_name="gpt-4o-mini", + max_loops=1, +) + +synthesis_agent = Agent( + agent_name="SynthesisAgent", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Build sequential workflow +workflow = GraphWorkflow( + name="Sequential-Workflow", + backend="rustworkx", + verbose=True, +) + +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) +workflow.add_node(synthesis_agent) + +# Create sequential chain +workflow.add_edge(research_agent, analysis_agent) +workflow.add_edge(analysis_agent, synthesis_agent) + +# Execute +results = workflow.run("Analyze the impact of AI on healthcare") +``` + +**Use Case**: When each agent needs the previous agent's output before proceeding. + +## Hierarchical Patterns + +### Multi-Level Hierarchy + +Hierarchical patterns organize agents into levels, where higher-level agents coordinate lower-level agents. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Executive] --> B[Research-Head] + A --> C[Analysis-Head] + B --> D[Researcher-1] + B --> E[Researcher-2] + C --> F[Analyst-1] + C --> G[Analyst-2] + D --> H[Synthesis-Agent] + E --> H + F --> H + G --> H +``` + +```python +from swarms import Agent, GraphWorkflow + +# Level 1: Executive/Coordinator +executive = Agent( + agent_name="Executive", + agent_description="Coordinates overall strategy", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Level 2: Department Heads +research_head = Agent( + agent_name="Research-Head", + agent_description="Leads research department", + model_name="gpt-4o-mini", + max_loops=1, +) + +analysis_head = Agent( + agent_name="Analysis-Head", + agent_description="Leads analysis department", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Level 3: Specialists +researcher_1 = Agent( + agent_name="Researcher-1", + agent_description="Market research specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +researcher_2 = Agent( + agent_name="Researcher-2", + agent_description="Technical research specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_1 = Agent( + agent_name="Analyst-1", + agent_description="Data analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_2 = Agent( + agent_name="Analyst-2", + agent_description="Financial analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Level 4: Synthesis +synthesis_agent = Agent( + agent_name="Synthesis-Agent", + agent_description="Synthesizes all outputs", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Build hierarchical workflow +workflow = GraphWorkflow( + name="Hierarchical-Workflow", + backend="rustworkx", + verbose=True, +) + +# Add all agents +all_agents = [ + executive, + research_head, + analysis_head, + researcher_1, + researcher_2, + analyst_1, + analyst_2, + synthesis_agent, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Level 1 -> Level 2 +workflow.add_edge(executive, research_head) +workflow.add_edge(executive, analysis_head) + +# Level 2 -> Level 3 +workflow.add_edges_from_source( + research_head, + [researcher_1, researcher_2], +) + +workflow.add_edges_from_source( + analysis_head, + [analyst_1, analyst_2], +) + +# Level 3 -> Level 4 (convergence) +workflow.add_edges_to_target( + [researcher_1, researcher_2, analyst_1, analyst_2], + synthesis_agent, +) + +# Execute +results = workflow.run("Conduct a comprehensive market analysis") +``` + +**Use Case**: Organizational structures, multi-level decision making, hierarchical data processing. + +### Tree Structure Hierarchy + +A tree-like hierarchy where one root agent branches into multiple specialized branches. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Root-Coordinator] --> B[Tech-Branch-Head] + A --> C[Business-Branch-Head] + B --> D[Tech-Specialist-1] + B --> E[Tech-Specialist-2] + C --> F[Business-Specialist-1] + C --> G[Business-Specialist-2] + D --> H[Final-Synthesis] + E --> H + F --> H + G --> H +``` + +```python +from swarms import Agent, GraphWorkflow + +# Root agent +root_coordinator = Agent( + agent_name="Root-Coordinator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 1: Technical Analysis +tech_branch_head = Agent( + agent_name="Tech-Branch-Head", + model_name="gpt-4o-mini", + max_loops=1, +) + +tech_specialist_1 = Agent( + agent_name="Tech-Specialist-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +tech_specialist_2 = Agent( + agent_name="Tech-Specialist-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 2: Business Analysis +business_branch_head = Agent( + agent_name="Business-Branch-Head", + model_name="gpt-4o-mini", + max_loops=1, +) + +business_specialist_1 = Agent( + agent_name="Business-Specialist-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +business_specialist_2 = Agent( + agent_name="Business-Specialist-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Convergence point +final_synthesis = Agent( + agent_name="Final-Synthesis", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Tree-Hierarchy-Workflow", + backend="rustworkx", +) + +all_agents = [ + root_coordinator, + tech_branch_head, + tech_specialist_1, + tech_specialist_2, + business_branch_head, + business_specialist_1, + business_specialist_2, + final_synthesis, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Root -> Branch heads +workflow.add_edge(root_coordinator, tech_branch_head) +workflow.add_edge(root_coordinator, business_branch_head) + +# Branch heads -> Specialists +workflow.add_edges_from_source( + tech_branch_head, + [tech_specialist_1, tech_specialist_2], +) + +workflow.add_edges_from_source( + business_branch_head, + [business_specialist_1, business_specialist_2], +) + +# All specialists -> Final synthesis +workflow.add_edges_to_target( + [ + tech_specialist_1, + tech_specialist_2, + business_specialist_1, + business_specialist_2, + ], + final_synthesis, +) + +results = workflow.run("Analyze a technology startup from multiple perspectives") +``` + +## Concurrent/Parallel Patterns + +### Full Parallel Execution + +All agents execute simultaneously without dependencies. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Parallel-Agent-1] --> D[Collector] + B[Parallel-Agent-2] --> D + C[Parallel-Agent-3] --> D +``` + +```python +from swarms import Agent, GraphWorkflow + +# Create independent parallel agents +parallel_agent_1 = Agent( + agent_name="Parallel-Agent-1", + agent_description="Independent analysis 1", + model_name="gpt-4o-mini", + max_loops=1, +) + +parallel_agent_2 = Agent( + agent_name="Parallel-Agent-2", + agent_description="Independent analysis 2", + model_name="gpt-4o-mini", + max_loops=1, +) + +parallel_agent_3 = Agent( + agent_name="Parallel-Agent-3", + agent_description="Independent analysis 3", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Convergence agent +collector = Agent( + agent_name="Collector", + agent_description="Collects all parallel results", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Full-Parallel-Workflow", + backend="rustworkx", +) + +for agent in [parallel_agent_1, parallel_agent_2, parallel_agent_3, collector]: + workflow.add_node(agent) + +# All parallel agents feed into collector +workflow.add_edges_to_target( + [parallel_agent_1, parallel_agent_2, parallel_agent_3], + collector, +) + +results = workflow.run("Analyze three different aspects of renewable energy") +``` + +**Use Case**: Independent analyses, parallel data collection, multi-perspective evaluation. + +### Layer-Based Parallel Execution + +Agents execute in layers, with all agents in a layer running in parallel. + +**Architecture Diagram:** + +```mermaid +graph TB + subgraph Layer1["Layer 1: Data Collection"] + A1[Data-Collector-1] + A2[Data-Collector-2] + A3[Data-Collector-3] + end + subgraph Layer2["Layer 2: Analysis"] + B1[Analyst-1] + B2[Analyst-2] + B3[Analyst-3] + end + subgraph Layer3["Layer 3: Synthesis"] + C[Synthesis] + end + A1 --> B1 + A1 --> B2 + A1 --> B3 + A2 --> B1 + A2 --> B2 + A2 --> B3 + A3 --> B1 + A3 --> B2 + A3 --> B3 + B1 --> C + B2 --> C + B3 --> C +``` + +```python +from swarms import Agent, GraphWorkflow + +# Layer 1: Data Collection (parallel) +data_collector_1 = Agent( + agent_name="Data-Collector-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +data_collector_2 = Agent( + agent_name="Data-Collector-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +data_collector_3 = Agent( + agent_name="Data-Collector-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Layer 2: Analysis (parallel, depends on Layer 1) +analyst_1 = Agent( + agent_name="Analyst-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_2 = Agent( + agent_name="Analyst-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_3 = Agent( + agent_name="Analyst-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Layer 3: Synthesis (depends on Layer 2) +synthesis = Agent( + agent_name="Synthesis", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Layer-Based-Parallel-Workflow", + backend="rustworkx", +) + +all_agents = [ + data_collector_1, + data_collector_2, + data_collector_3, + analyst_1, + analyst_2, + analyst_3, + synthesis, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Layer 1 -> Layer 2: Full mesh connection +workflow.add_parallel_chain( + [data_collector_1, data_collector_2, data_collector_3], + [analyst_1, analyst_2, analyst_3], +) + +# Layer 2 -> Layer 3: Convergence +workflow.add_edges_to_target( + [analyst_1, analyst_2, analyst_3], + synthesis, +) + +results = workflow.run("Process and analyze data in parallel layers") +``` + +**Use Case**: Pipeline processing, multi-stage analysis, batch processing workflows. + +## Majority Voting Patterns + +### Simple Majority Vote + +Multiple agents vote on a decision, with a majority vote aggregator. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Voter-1] --> F[Vote-Aggregator] + B[Voter-2] --> F + C[Voter-3] --> F + D[Voter-4] --> F + E[Voter-5] --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Voting agents +voter_1 = Agent( + agent_name="Voter-1", + agent_description="Provides vote/opinion 1", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_2 = Agent( + agent_name="Voter-2", + agent_description="Provides vote/opinion 2", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_3 = Agent( + agent_name="Voter-3", + agent_description="Provides vote/opinion 3", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_4 = Agent( + agent_name="Voter-4", + agent_description="Provides vote/opinion 4", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_5 = Agent( + agent_name="Voter-5", + agent_description="Provides vote/opinion 5", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Vote aggregator (implements majority voting logic) +vote_aggregator = Agent( + agent_name="Vote-Aggregator", + agent_description="Aggregates votes and determines majority decision", + system_prompt="""You are a vote aggregator. Analyze all the votes/opinions provided + and determine the majority consensus. Provide a clear summary of the majority decision.""", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Majority-Voting-Workflow", + backend="rustworkx", +) + +all_agents = [voter_1, voter_2, voter_3, voter_4, voter_5, vote_aggregator] + +for agent in all_agents: + workflow.add_node(agent) + +# All voters -> Aggregator +workflow.add_edges_to_target( + [voter_1, voter_2, voter_3, voter_4, voter_5], + vote_aggregator, +) + +results = workflow.run( + "Should we invest in renewable energy stocks? Provide your vote and reasoning." +) +``` + +**Use Case**: Decision making, consensus building, quality assurance, validation. + +### Weighted Majority Vote + +Similar to simple majority vote but with weighted voters. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Expert-Voter-1
Weight: 2x] --> F[Weighted-Aggregator] + B[Expert-Voter-2
Weight: 2x] --> F + C[Regular-Voter-1
Weight: 1x] --> F + D[Regular-Voter-2
Weight: 1x] --> F + E[Regular-Voter-3
Weight: 1x] --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Expert voters (higher weight) +expert_voter_1 = Agent( + agent_name="Expert-Voter-1", + agent_description="Senior expert with high weight", + model_name="gpt-4o-mini", + max_loops=1, +) + +expert_voter_2 = Agent( + agent_name="Expert-Voter-2", + agent_description="Senior expert with high weight", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Regular voters (standard weight) +regular_voter_1 = Agent( + agent_name="Regular-Voter-1", + agent_description="Regular voter", + model_name="gpt-4o-mini", + max_loops=1, +) + +regular_voter_2 = Agent( + agent_name="Regular-Voter-2", + agent_description="Regular voter", + model_name="gpt-4o-mini", + max_loops=1, +) + +regular_voter_3 = Agent( + agent_name="Regular-Voter-3", + agent_description="Regular voter", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Weighted aggregator +weighted_aggregator = Agent( + agent_name="Weighted-Aggregator", + agent_description="Aggregates votes with expert weighting", + system_prompt="""You are a weighted vote aggregator. Expert voters (Expert-Voter-1, Expert-Voter-2) + have 2x weight compared to regular voters. Analyze all votes and determine the weighted majority decision.""", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Weighted-Majority-Voting-Workflow", + backend="rustworkx", +) + +all_agents = [ + expert_voter_1, + expert_voter_2, + regular_voter_1, + regular_voter_2, + regular_voter_3, + weighted_aggregator, +] + +for agent in all_agents: + workflow.add_node(agent) + +# All voters -> Weighted aggregator +workflow.add_edges_to_target( + [ + expert_voter_1, + expert_voter_2, + regular_voter_1, + regular_voter_2, + regular_voter_3, + ], + weighted_aggregator, +) + +results = workflow.run( + "Evaluate a business proposal. Experts should provide detailed analysis, regular voters provide standard evaluation." +) +``` + +## Fan-Out/Fan-In Patterns + +### Simple Fan-Out + +One source agent distributes work to multiple target agents. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Coordinator] --> B[Specialist-1] + A --> C[Specialist-2] + A --> D[Specialist-3] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Source agent +coordinator = Agent( + agent_name="Coordinator", + agent_description="Distributes tasks to specialists", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Target agents (specialists) +specialist_1 = Agent( + agent_name="Specialist-1", + agent_description="Technical specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +specialist_2 = Agent( + agent_name="Specialist-2", + agent_description="Business specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +specialist_3 = Agent( + agent_name="Specialist-3", + agent_description="Financial specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Fan-Out-Workflow", + backend="rustworkx", +) + +for agent in [coordinator, specialist_1, specialist_2, specialist_3]: + workflow.add_node(agent) + +# Fan-out: One source to multiple targets +workflow.add_edges_from_source( + coordinator, + [specialist_1, specialist_2, specialist_3], +) + +results = workflow.run("Analyze a startup from technical, business, and financial perspectives") +``` + +**Use Case**: Task distribution, parallel specialization, workload splitting. + +### Simple Fan-In + +Multiple source agents converge to a single target agent. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Analyst-1] --> D[Synthesis] + B[Analyst-2] --> D + C[Analyst-3] --> D +``` + +```python +from swarms import Agent, GraphWorkflow + +# Source agents +analyst_1 = Agent( + agent_name="Analyst-1", + agent_description="Technical analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_2 = Agent( + agent_name="Analyst-2", + agent_description="Market analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_3 = Agent( + agent_name="Analyst-3", + agent_description="Financial analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Target agent (synthesis) +synthesis = Agent( + agent_name="Synthesis", + agent_description="Synthesizes all analyses", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Fan-In-Workflow", + backend="rustworkx", +) + +for agent in [analyst_1, analyst_2, analyst_3, synthesis]: + workflow.add_node(agent) + +# Fan-in: Multiple sources to one target +workflow.add_edges_to_target( + [analyst_1, analyst_2, analyst_3], + synthesis, +) + +results = workflow.run("Provide comprehensive analysis from multiple perspectives") +``` + +**Use Case**: Result aggregation, synthesis, convergence of parallel work. + +### Fan-Out Followed by Fan-In + +A common pattern: distribute work, then aggregate results. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Coordinator] --> B[Worker-1] + A --> C[Worker-2] + A --> D[Worker-3] + A --> E[Worker-4] + B --> F[Aggregator] + C --> F + D --> F + E --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Initial coordinator +coordinator = Agent( + agent_name="Coordinator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Parallel workers +worker_1 = Agent( + agent_name="Worker-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +worker_2 = Agent( + agent_name="Worker-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +worker_3 = Agent( + agent_name="Worker-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +worker_4 = Agent( + agent_name="Worker-4", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Final aggregator +aggregator = Agent( + agent_name="Aggregator", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Fan-Out-Fan-In-Workflow", + backend="rustworkx", +) + +all_agents = [ + coordinator, + worker_1, + worker_2, + worker_3, + worker_4, + aggregator, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Fan-out: Coordinator -> Workers +workflow.add_edges_from_source( + coordinator, + [worker_1, worker_2, worker_3, worker_4], +) + +# Fan-in: Workers -> Aggregator +workflow.add_edges_to_target( + [worker_1, worker_2, worker_3, worker_4], + aggregator, +) + +results = workflow.run("Distribute research tasks and synthesize results") +``` + +**Use Case**: Map-reduce patterns, parallel processing with aggregation, distributed analysis. + +## Sequential Patterns + +### Linear Chain + +Simple sequential execution where each agent depends on the previous one. + +**Architecture Diagram:** + +```mermaid +graph LR + A[Agent-1] --> B[Agent-2] + B --> C[Agent-3] + C --> D[Agent-4] + D --> E[Agent-5] +``` + +```python +from swarms import Agent, GraphWorkflow + +agents = [ + Agent( + agent_name=f"Agent-{i+1}", + model_name="gpt-4o-mini", + max_loops=1, + ) + for i in range(5) +] + +workflow = GraphWorkflow( + name="Linear-Chain-Workflow", + backend="rustworkx", +) + +for agent in agents: + workflow.add_node(agent) + +# Create linear chain +for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + +results = workflow.run("Process data through a linear pipeline") +``` + +### Sequential with Branching + +Sequential flow with conditional branching. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Initial] --> B[Branch-1-Agent] + A --> C[Branch-2-Agent] + B --> D[Branch-1-Continuation] + C --> E[Branch-2-Continuation] + D --> F[Final] + E --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Initial agent +initial = Agent( + agent_name="Initial", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 1 +branch_1_agent = Agent( + agent_name="Branch-1-Agent", + model_name="gpt-4o-mini", + max_loops=1, +) + +branch_1_continuation = Agent( + agent_name="Branch-1-Continuation", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 2 +branch_2_agent = Agent( + agent_name="Branch-2-Agent", + model_name="gpt-4o-mini", + max_loops=1, +) + +branch_2_continuation = Agent( + agent_name="Branch-2-Continuation", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Convergence +final = Agent( + agent_name="Final", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Sequential-Branching-Workflow", + backend="rustworkx", +) + +all_agents = [ + initial, + branch_1_agent, + branch_1_continuation, + branch_2_agent, + branch_2_continuation, + final, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Initial -> Branches +workflow.add_edge(initial, branch_1_agent) +workflow.add_edge(initial, branch_2_agent) + +# Branch continuations +workflow.add_edge(branch_1_agent, branch_1_continuation) +workflow.add_edge(branch_2_agent, branch_2_continuation) + +# Convergence +workflow.add_edge(branch_1_continuation, final) +workflow.add_edge(branch_2_continuation, final) + +results = workflow.run("Process through branching paths") +``` + +## Advanced Patterns + +### Pipeline with Validation + +Sequential pipeline with validation checkpoints. + +**Architecture Diagram:** + +```mermaid +graph LR + A[Data-Collector] --> B[Validator-1] + B --> C[Processor] + C --> D[Validator-2] + D --> E[Finalizer] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Pipeline stages +data_collector = Agent( + agent_name="Data-Collector", + model_name="gpt-4o-mini", + max_loops=1, +) + +validator_1 = Agent( + agent_name="Validator-1", + agent_description="Validates data quality", + model_name="gpt-4o-mini", + max_loops=1, +) + +processor = Agent( + agent_name="Processor", + model_name="gpt-4o-mini", + max_loops=1, +) + +validator_2 = Agent( + agent_name="Validator-2", + agent_description="Validates processing results", + model_name="gpt-4o-mini", + max_loops=1, +) + +finalizer = Agent( + agent_name="Finalizer", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Pipeline-With-Validation", + backend="rustworkx", +) + +for agent in [data_collector, validator_1, processor, validator_2, finalizer]: + workflow.add_node(agent) + +# Sequential pipeline with validation checkpoints +workflow.add_edge(data_collector, validator_1) +workflow.add_edge(validator_1, processor) +workflow.add_edge(processor, validator_2) +workflow.add_edge(validator_2, finalizer) + +results = workflow.run("Process data with quality checkpoints") +``` + +### Multi-Stage Review Process + +Multiple review stages before final approval. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Submitter] --> B[Reviewer-1A] + A --> C[Reviewer-1B] + B --> D[Stage-1-Aggregator] + C --> D + D --> E[Reviewer-2A] + D --> F[Reviewer-2B] + E --> G[Stage-2-Aggregator] + F --> G + G --> H[Approver] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Initial submission +submitter = Agent( + agent_name="Submitter", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Review stage 1 (parallel reviewers) +reviewer_1a = Agent( + agent_name="Reviewer-1A", + model_name="gpt-4o-mini", + max_loops=1, +) + +reviewer_1b = Agent( + agent_name="Reviewer-1B", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Stage 1 aggregator +stage_1_aggregator = Agent( + agent_name="Stage-1-Aggregator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Review stage 2 +reviewer_2a = Agent( + agent_name="Reviewer-2A", + model_name="gpt-4o-mini", + max_loops=1, +) + +reviewer_2b = Agent( + agent_name="Reviewer-2B", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Stage 2 aggregator +stage_2_aggregator = Agent( + agent_name="Stage-2-Aggregator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Final approver +approver = Agent( + agent_name="Approver", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Multi-Stage-Review", + backend="rustworkx", +) + +all_agents = [ + submitter, + reviewer_1a, + reviewer_1b, + stage_1_aggregator, + reviewer_2a, + reviewer_2b, + stage_2_aggregator, + approver, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Stage 1: Parallel review +workflow.add_edge(submitter, reviewer_1a) +workflow.add_edge(submitter, reviewer_1b) +workflow.add_edges_to_target([reviewer_1a, reviewer_1b], stage_1_aggregator) + +# Stage 2: Parallel review +workflow.add_edge(stage_1_aggregator, reviewer_2a) +workflow.add_edge(stage_1_aggregator, reviewer_2b) +workflow.add_edges_to_target([reviewer_2a, reviewer_2b], stage_2_aggregator) + +# Final approval +workflow.add_edge(stage_2_aggregator, approver) + +results = workflow.run("Review and approve a proposal through multiple stages") +``` + +### Circular/Iterative Pattern + +Agents form a cycle for iterative refinement. + +**Architecture Diagram:** + +```mermaid +graph LR + A[Agent-1] --> B[Agent-2] + B --> C[Agent-3] + C --> D[Exit-Checker] + D -.->|Iterate| A +``` + +```python +from swarms import Agent, GraphWorkflow + +# Create iterative refinement agents +agent_1 = Agent( + agent_name="Agent-1", + agent_description="First refinement stage", + model_name="gpt-4o-mini", + max_loops=1, +) + +agent_2 = Agent( + agent_name="Agent-2", + agent_description="Second refinement stage", + model_name="gpt-4o-mini", + max_loops=1, +) + +agent_3 = Agent( + agent_name="Agent-3", + agent_description="Third refinement stage", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Exit condition checker +exit_checker = Agent( + agent_name="Exit-Checker", + agent_description="Checks if refinement is complete", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Iterative-Refinement", + backend="rustworkx", + max_loops=3, # Limit iterations +) + +for agent in [agent_1, agent_2, agent_3, exit_checker]: + workflow.add_node(agent) + +# Circular refinement +workflow.add_edge(agent_1, agent_2) +workflow.add_edge(agent_2, agent_3) +workflow.add_edge(agent_3, exit_checker) +# Note: For true iteration, you'd need to add edge back to agent_1 +# This is a simplified example + +results = workflow.run("Iteratively refine a document") +``` + +### Star Pattern + +Central hub agent coordinates with multiple spoke agents. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Hub] --> B[Spoke-1] + A --> C[Spoke-2] + A --> D[Spoke-3] + A --> E[Spoke-4] + B --> A + C --> A + D --> A + E --> A +``` + +```python +from swarms import Agent, GraphWorkflow + +# Central hub +hub = Agent( + agent_name="Hub", + agent_description="Central coordinator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Spoke agents +spoke_1 = Agent( + agent_name="Spoke-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +spoke_2 = Agent( + agent_name="Spoke-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +spoke_3 = Agent( + agent_name="Spoke-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +spoke_4 = Agent( + agent_name="Spoke-4", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Star-Pattern-Workflow", + backend="rustworkx", +) + +for agent in [hub, spoke_1, spoke_2, spoke_3, spoke_4]: + workflow.add_node(agent) + +# Hub -> Spokes (fan-out) +workflow.add_edges_from_source( + hub, + [spoke_1, spoke_2, spoke_3, spoke_4], +) + +# Spokes -> Hub (fan-in) +workflow.add_edges_to_target( + [spoke_1, spoke_2, spoke_3, spoke_4], + hub, +) + +results = workflow.run("Coordinate work through a central hub") +``` + +## Performance Optimization + +### Compilation Best Practices + +Always compile workflows before execution for optimal performance: + +```python +workflow = GraphWorkflow( + name="Optimized-Workflow", + backend="rustworkx", + auto_compile=True, # Automatic compilation +) + +# Or manually compile +workflow.compile() + +# Check compilation status +status = workflow.get_compilation_status() +print(f"Compiled: {status['is_compiled']}") +print(f"Layers: {status['cached_layers_count']}") +``` + +### Large-Scale Workflow Tips + +For workflows with 100+ agents: + +1. **Use rustworkx backend** for better performance +2. **Compile before execution** to cache topological layers +3. **Use parallel patterns** to maximize throughput +4. **Monitor compilation status** to ensure optimization + +```python +# Large-scale workflow example +workflow = GraphWorkflow( + name="Large-Scale-Workflow", + backend="rustworkx", # Essential for large graphs + auto_compile=True, + verbose=True, # Monitor performance +) + +# Add many agents... +# Use parallel patterns for efficiency + +# Check performance +status = workflow.get_compilation_status() +print(f"Max workers: {status['max_workers']}") +print(f"Layers: {status['cached_layers_count']}") +``` + +### Visualization for Debugging + +Visualize workflows to understand structure and optimize: + +```python +# Generate visualization +output_file = workflow.visualize( + format="png", + show_summary=True, # Shows parallel patterns + view=True, +) + +# Or simple text visualization +workflow.visualize_simple() +``` + +## Conclusion + +GraphWorkflow with rustworkx backend provides a powerful framework for implementing complex multi-agent patterns. Key takeaways: + +1. **Choose the right pattern** for your use case +2. **Use rustworkx** for large-scale workflows (100+ nodes) +3. **Leverage parallel patterns** for performance +4. **Compile workflows** before execution +5. **Visualize** to understand and debug workflows + +For more examples, see the [rustworkx examples directory](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent/graphworkflow_examples/rustworkx_examples). From 93b17bd64633918793d3d58562b3dfff0537f1a1 Mon Sep 17 00:00:00 2001 From: Aksh Parekh Date: Fri, 21 Nov 2025 18:46:22 -0800 Subject: [PATCH 10/16] [BUG-FIX] Test Update for build_agent -> build_llm_agent --- tests/structs/test_auto_swarms_builder.py | 35 ++++++++++++++++------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/tests/structs/test_auto_swarms_builder.py b/tests/structs/test_auto_swarms_builder.py index a1e9085a..768256e1 100644 --- a/tests/structs/test_auto_swarms_builder.py +++ b/tests/structs/test_auto_swarms_builder.py @@ -41,21 +41,27 @@ def test_initialization(): def test_agent_building(): - """Test building individual agents""" + """Test building individual agents from specs""" print_separator() print("Testing Agent Building") try: swarm = AutoSwarmBuilder() - agent = swarm.build_agent( - agent_name="TestAgent", - agent_description="A test agent", - agent_system_prompt="You are a test agent", - max_loops=1, - ) + specs = { + "agents": [ + { + "agent_name": "TestAgent", + "description": "A test agent", + "system_prompt": "You are a test agent", + "max_loops": 1, + } + ] + } + agents = swarm.create_agents_from_specs(specs) + agent = agents[0] print("āœ“ Built agent with configuration:") print(f" - Name: {agent.agent_name}") - print(f" - Description: {agent.description}") + print(f" - Description: {agent.agent_description}") print(f" - Max loops: {agent.max_loops}") print("āœ“ Agent building test passed") return agent @@ -69,18 +75,25 @@ def test_agent_creation(): print_separator() print("Testing Agent Creation from Task") try: + import json + swarm = AutoSwarmBuilder( name="ResearchSwarm", description="A swarm for research tasks", ) task = "Research the latest developments in quantum computing" - agents = swarm._create_agents(task) + # create_agents returns a JSON string + agent_specs_json = swarm.create_agents(task) + # Parse JSON string to dict + agent_specs = json.loads(agent_specs_json) + # Convert specs to actual Agent objects + agents = swarm.create_agents_from_specs(agent_specs) print("āœ“ Created agents for research task:") for i, agent in enumerate(agents, 1): print(f" Agent {i}:") print(f" - Name: {agent.agent_name}") - print(f" - Description: {agent.description}") + print(f" - Description: {agent.agent_description}") print(f"āœ“ Created {len(agents)} agents successfully") return agents except Exception as e: @@ -155,7 +168,7 @@ def test_error_handling(): # Test with invalid agent configuration print("Testing invalid agent configuration...") try: - swarm.build_agent("", "", "") + swarm.create_agents_from_specs({"agents": [{"agent_name": ""}]}) print( "āœ— Should have raised an error for empty agent configuration" ) From c2ae1ec33681a5f570cec9c9f9fbee1d4d48e657 Mon Sep 17 00:00:00 2001 From: Steve-Dusty Date: Sat, 22 Nov 2025 01:34:29 -0800 Subject: [PATCH 11/16] added ire test --- tests/structs/test_i_agent.py | 86 +++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 tests/structs/test_i_agent.py diff --git a/tests/structs/test_i_agent.py b/tests/structs/test_i_agent.py new file mode 100644 index 00000000..3edf9a8e --- /dev/null +++ b/tests/structs/test_i_agent.py @@ -0,0 +1,86 @@ +import pytest + +from swarms.agents.i_agent import IterativeReflectiveExpansion + + +def test_ire_agent_initialization(): + """Test IRE agent initialization with default parameters""" + agent = IterativeReflectiveExpansion() + + assert agent is not None + assert agent.agent_name == "General-Reasoning-Agent" + assert agent.max_iterations == 5 + assert agent.output_type == "dict" + assert agent.agent is not None + + +def test_ire_agent_custom_initialization(): + """Test IRE agent initialization with custom parameters""" + agent = IterativeReflectiveExpansion( + agent_name="Custom-IRE-Agent", + description="A custom reasoning agent", + max_iterations=3, + model_name="gpt-4o", + output_type="string", + ) + + assert agent.agent_name == "Custom-IRE-Agent" + assert agent.description == "A custom reasoning agent" + assert agent.max_iterations == 3 + assert agent.output_type == "string" + + +def test_ire_agent_execution(): + """Test IRE agent execution with a simple problem""" + agent = IterativeReflectiveExpansion( + agent_name="Test-IRE-Agent", + model_name="gpt-4o", + max_iterations=2, + output_type="dict", + ) + + # Test with a simple reasoning task + task = "What are three main benefits of renewable energy?" + result = agent.run(task) + + # Result should not be None + assert result is not None + # Result should be dict or string based on output_type + assert isinstance(result, (str, dict)) + + +def test_ire_agent_generate_hypotheses(): + """Test IRE agent hypothesis generation""" + agent = IterativeReflectiveExpansion( + agent_name="Hypothesis-Test-Agent", + max_iterations=1, + ) + + task = "How can we reduce carbon emissions?" + hypotheses = agent.generate_initial_hypotheses(task) + + assert hypotheses is not None + assert isinstance(hypotheses, list) + assert len(hypotheses) > 0 + + +def test_ire_agent_workflow(): + """Test complete IRE agent workflow with iterative refinement""" + agent = IterativeReflectiveExpansion( + agent_name="Workflow-Test-Agent", + description="Agent for testing complete workflow", + model_name="gpt-4o", + max_iterations=2, + output_type="dict", + ) + + # Test with a problem that requires iterative refinement + task = "Design an efficient public transportation system for a small city" + result = agent.run(task) + + # Verify the result is valid + assert result is not None + assert isinstance(result, (str, dict)) + + # Check that conversation was populated during execution + assert agent.conversation is not None From 688772e99b9ab3e34542df097c09ffb8e58e1a63 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sat, 22 Nov 2025 17:53:49 -0800 Subject: [PATCH 12/16] [FEAT][LLMCouncil][Docs][Examples] --- docs/examples/llm_council_examples.md | 106 ++++ docs/mkdocs.yml | 2 + docs/swarms/structs/llm_council.md | 453 +++++++++++++++++ .../llm_council_examples/README.md | 95 ++++ .../business_strategy_council.py | 32 ++ .../etf_stock_analysis_council.py | 30 ++ .../finance_analysis_council.py | 30 ++ .../legal_analysis_council.py | 32 ++ .../marketing_strategy_council.py | 29 ++ .../medical_diagnosis_council.py | 37 ++ .../medical_treatment_council.py | 31 ++ .../research_analysis_council.py | 32 ++ .../technology_assessment_council.py | 32 ++ hiearchical_swarm_example.py | 3 +- llm_council_example.py | 23 + pyproject.toml | 2 +- swarms/structs/__init__.py | 2 + swarms/structs/aop.py | 1 + swarms/structs/llm_council.py | 459 ++++++++++++++++++ 19 files changed, 1428 insertions(+), 3 deletions(-) create mode 100644 docs/examples/llm_council_examples.md create mode 100644 docs/swarms/structs/llm_council.md create mode 100644 examples/multi_agent/llm_council_examples/README.md create mode 100644 examples/multi_agent/llm_council_examples/business_strategy_council.py create mode 100644 examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py create mode 100644 examples/multi_agent/llm_council_examples/finance_analysis_council.py create mode 100644 examples/multi_agent/llm_council_examples/legal_analysis_council.py create mode 100644 examples/multi_agent/llm_council_examples/marketing_strategy_council.py create mode 100644 examples/multi_agent/llm_council_examples/medical_diagnosis_council.py create mode 100644 examples/multi_agent/llm_council_examples/medical_treatment_council.py create mode 100644 examples/multi_agent/llm_council_examples/research_analysis_council.py create mode 100644 examples/multi_agent/llm_council_examples/technology_assessment_council.py create mode 100644 llm_council_example.py create mode 100644 swarms/structs/llm_council.py diff --git a/docs/examples/llm_council_examples.md b/docs/examples/llm_council_examples.md new file mode 100644 index 00000000..ab607dbc --- /dev/null +++ b/docs/examples/llm_council_examples.md @@ -0,0 +1,106 @@ +# LLM Council Examples + +This page provides examples demonstrating the LLM Council pattern, inspired by Andrej Karpathy's llm-council implementation. The LLM Council uses multiple specialized AI agents that: + +1. Each respond independently to queries +2. Review and rank each other's anonymized responses +3. Have a Chairman synthesize all responses into a final comprehensive answer + +## Example Files + +All LLM Council examples are located in the [`examples/multi_agent/llm_council_examples/`](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent/llm_council_examples) directory. + +### Marketing & Business + +- **[marketing_strategy_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/marketing_strategy_council.py)** - Marketing strategy analysis and recommendations +- **[business_strategy_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/business_strategy_council.py)** - Comprehensive business strategy development + +### Finance & Investment + +- **[finance_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/finance_analysis_council.py)** - Financial analysis and investment recommendations +- **[etf_stock_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py)** - ETF and stock analysis with portfolio recommendations + +### Medical & Healthcare + +- **[medical_treatment_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/medical_treatment_council.py)** - Medical treatment recommendations and care plans +- **[medical_diagnosis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py)** - Diagnostic analysis based on symptoms + +### Technology & Research + +- **[technology_assessment_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/technology_assessment_council.py)** - Technology evaluation and implementation strategy +- **[research_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/research_analysis_council.py)** - Comprehensive research analysis on complex topics + +### Legal + +- **[legal_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/legal_analysis_council.py)** - Legal implications and compliance analysis + +## Basic Usage Pattern + +All examples follow the same pattern: + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Run a query +result = council.run("Your query here") + +# Access results +print(result["final_response"]) # Chairman's synthesized answer +print(result["original_responses"]) # Individual member responses +print(result["evaluations"]) # How members ranked each other +``` + +## Running Examples + +Run any example directly: + +```bash +python examples/multi_agent/llm_council_examples/marketing_strategy_council.py +python examples/multi_agent/llm_council_examples/finance_analysis_council.py +python examples/multi_agent/llm_council_examples/medical_diagnosis_council.py +``` + +## Key Features + +- **Multiple Perspectives**: Each council member (GPT-5.1, Gemini, Claude, Grok) provides unique insights +- **Peer Review**: Members evaluate and rank each other's responses anonymously +- **Synthesis**: Chairman combines the best elements from all responses +- **Transparency**: See both individual responses and evaluation rankings + +## Council Members + +The default council consists of: +- **GPT-5.1-Councilor**: Analytical and comprehensive +- **Gemini-3-Pro-Councilor**: Concise and well-processed +- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced +- **Grok-4-Councilor**: Creative and innovative + +## Customization + +You can create custom council members: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +custom_agent = Agent( + agent_name="Custom-Councilor", + system_prompt=get_gpt_councilor_prompt(), + model_name="gpt-4.1", + max_loops=1, +) + +council = LLMCouncil( + council_members=[custom_agent, ...], + chairman_model="gpt-5.1", + verbose=True +) +``` + +## Documentation + +For complete API reference and detailed documentation, see the [LLM Council Reference Documentation](../swarms/structs/llm_council.md). + diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 5b70d5f6..53936b07 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -281,6 +281,7 @@ nav: - MALT: "swarms/structs/malt.md" - Multi-Agent Execution Utilities: "swarms/structs/various_execution_methods.md" - Council of Judges: "swarms/structs/council_of_judges.md" + - LLM Council: "swarms/structs/llm_council.md" - Heavy Swarm: "swarms/structs/heavy_swarm.md" - Social Algorithms: "swarms/structs/social_algorithms.md" @@ -401,6 +402,7 @@ nav: - ConcurrentWorkflow Example: "swarms/examples/concurrent_workflow.md" - Multi-Agentic Patterns with GraphWorkflow: "swarms/examples/graphworkflow_rustworkx_patterns.md" - Mixture of Agents Example: "swarms/examples/moa_example.md" + - LLM Council Examples: "examples/llm_council_examples.md" - Unique Swarms: "swarms/examples/unique_swarms.md" - Agents as Tools: "swarms/examples/agents_as_tools.md" - Aggregate Multi-Agent Responses: "swarms/examples/aggregate.md" diff --git a/docs/swarms/structs/llm_council.md b/docs/swarms/structs/llm_council.md new file mode 100644 index 00000000..6352bcef --- /dev/null +++ b/docs/swarms/structs/llm_council.md @@ -0,0 +1,453 @@ +# LLM Council Class Documentation + +```mermaid +flowchart TD + A[User Query] --> B[LLM Council Initialization] + B --> C{Council Members Provided?} + C -->|No| D[Create Default Council] + C -->|Yes| E[Use Provided Members] + D --> F[Step 1: Parallel Response Generation] + E --> F + + subgraph "Default Council Members" + G1[GPT-5.1-Councilor
Analytical & Comprehensive] + G2[Gemini-3-Pro-Councilor
Concise & Structured] + G3[Claude-Sonnet-4.5-Councilor
Thoughtful & Balanced] + G4[Grok-4-Councilor
Creative & Innovative] + end + + F --> G1 + F --> G2 + F --> G3 + F --> G4 + + G1 --> H[Collect All Responses] + G2 --> H + G3 --> H + G4 --> H + + H --> I[Step 2: Anonymize Responses] + I --> J[Assign Anonymous IDs: A, B, C, D...] + + J --> K[Step 3: Parallel Evaluation] + + subgraph "Evaluation Phase" + K --> L1[Member 1 Evaluates All] + K --> L2[Member 2 Evaluates All] + K --> L3[Member 3 Evaluates All] + K --> L4[Member 4 Evaluates All] + end + + L1 --> M[Collect Evaluations & Rankings] + L2 --> M + L3 --> M + L4 --> M + + M --> N[Step 4: Chairman Synthesis] + N --> O[Chairman Agent] + O --> P[Final Synthesized Response] + + P --> Q[Return Results Dictionary] + + style A fill:#e1f5ff + style P fill:#c8e6c9 + style Q fill:#c8e6c9 + style O fill:#fff9c4 +``` + +The `LLMCouncil` class orchestrates multiple specialized LLM agents to collaboratively answer queries through a structured peer review and synthesis process. Inspired by Andrej Karpathy's llm-council implementation, this architecture demonstrates how different models evaluate and rank each other's work, often selecting responses from other models as superior to their own. + +## Workflow Overview + +The LLM Council follows a four-step process: + +1. **Parallel Response Generation**: All council members independently respond to the user query +2. **Anonymization**: Responses are anonymized with random IDs (A, B, C, D, etc.) to ensure objective evaluation +3. **Peer Review**: Each member evaluates and ranks all responses (including potentially their own) +4. **Synthesis**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer + +## Class Definition + +### LLMCouncil + +```python +class LLMCouncil: +``` + +### Attributes + +| Attribute | Type | Description | Default | +|-----------|------|-------------|---------| +| `council_members` | `List[Agent]` | List of Agent instances representing council members | `None` (creates default council) | +| `chairman` | `Agent` | The Chairman agent responsible for synthesizing responses | Created during initialization | +| `verbose` | `bool` | Whether to print progress and intermediate results | `True` | + +## Methods + +### `__init__` + +Initializes the LLM Council with council members and a Chairman agent. + +#### Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `council_members` | `Optional[List[Agent]]` | `None` | List of Agent instances representing council members. If `None`, creates default council with GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4. | +| `chairman_model` | `str` | `"gpt-5.1"` | Model name for the Chairman agent that synthesizes responses. | +| `verbose` | `bool` | `True` | Whether to print progress and intermediate results. | + +#### Returns + +| Type | Description | +|------|-------------| +| `LLMCouncil` | Initialized LLM Council instance. | + +#### Description + +Creates an LLM Council instance with specialized council members. If no members are provided, it creates a default council consisting of: +- **GPT-5.1-Councilor**: Analytical and comprehensive responses +- **Gemini-3-Pro-Councilor**: Concise and well-processed responses +- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced responses +- **Grok-4-Councilor**: Creative and innovative responses + +The Chairman agent is automatically created with a specialized prompt for synthesizing responses. + +#### Example Usage + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create council with default members +council = LLMCouncil(verbose=True) + +# Create council with custom members +from swarms import Agent +custom_members = [ + Agent(agent_name="Expert-1", model_name="gpt-4", max_loops=1), + Agent(agent_name="Expert-2", model_name="claude-3-opus", max_loops=1), +] +council = LLMCouncil( + council_members=custom_members, + chairman_model="gpt-4", + verbose=True +) +``` + +--- + +### `run` + +Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis. + +#### Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `query` | `str` | Required | The user's query to process through the council. | + +#### Returns + +| Type | Description | +|------|-------------| +| `Dict` | Dictionary containing the following keys: | + +#### Return Dictionary Structure + +| Key | Type | Description | +|-----|------|-------------| +| `query` | `str` | The original user query. | +| `original_responses` | `Dict[str, str]` | Dictionary mapping council member names to their original responses. | +| `evaluations` | `Dict[str, str]` | Dictionary mapping evaluator names to their evaluation texts (rankings and reasoning). | +| `final_response` | `str` | The Chairman's synthesized final answer combining all perspectives. | +| `anonymous_mapping` | `Dict[str, str]` | Mapping from anonymous IDs (A, B, C, D) to member names for reference. | + +#### Description + +Executes the complete LLM Council workflow: + +1. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently` +2. **Collection Phase**: Collects all responses and maps them to member names +3. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity +4. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution` +5. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer + +The method provides verbose output by default, showing progress at each stage. + +#### Example Usage + +```python +from swarms.structs.llm_council import LLMCouncil + +council = LLMCouncil(verbose=True) + +query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" + +result = council.run(query) + +# Access the final synthesized response +print(result["final_response"]) + +# Access individual member responses +for name, response in result["original_responses"].items(): + print(f"{name}: {response[:200]}...") + +# Access evaluation rankings +for evaluator, evaluation in result["evaluations"].items(): + print(f"{evaluator} evaluation:\n{evaluation[:300]}...") + +# Check anonymous mapping +print("Anonymous IDs:", result["anonymous_mapping"]) +``` + +--- + +### `_create_default_council` + +Creates default council members with specialized prompts and models. + +#### Parameters + +None (internal method). + +#### Returns + +| Type | Description | +|------|-------------| +| `List[Agent]` | List of Agent instances configured as council members. | + +#### Description + +Internal method that creates the default council configuration with four specialized agents: + +- **GPT-5.1-Councilor** (`model_name="gpt-5.1"`): Analytical and comprehensive, temperature=0.7 +- **Gemini-3-Pro-Councilor** (`model_name="gemini-2.5-flash"`): Concise and structured, temperature=0.7 +- **Claude-Sonnet-4.5-Councilor** (`model_name="anthropic/claude-sonnet-4-5"`): Thoughtful and balanced, temperature=0.0 +- **Grok-4-Councilor** (`model_name="x-ai/grok-4"`): Creative and innovative, temperature=0.8 + +Each agent is configured with: +- Specialized system prompts matching their role +- `max_loops=1` for single-response generation +- `verbose=False` to reduce noise during parallel execution +- Appropriate temperature settings for their style + +--- + +## Helper Functions + +### `get_gpt_councilor_prompt()` + +Returns the system prompt for GPT-5.1 councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing analytical thinking and comprehensive coverage. | + +--- + +### `get_gemini_councilor_prompt()` + +Returns the system prompt for Gemini 3 Pro councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing concise, well-processed, and structured responses. | + +--- + +### `get_claude_councilor_prompt()` + +Returns the system prompt for Claude Sonnet 4.5 councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing thoughtful, balanced, and nuanced responses. | + +--- + +### `get_grok_councilor_prompt()` + +Returns the system prompt for Grok-4 councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing creative, innovative, and unique perspectives. | + +--- + +### `get_chairman_prompt()` + +Returns the system prompt for the Chairman agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string for synthesizing responses and evaluations into a final answer. | + +--- + +### `get_evaluation_prompt(query, responses, evaluator_name)` + +Creates evaluation prompt for council members to review and rank responses. + +#### Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `query` | `str` | The original user query. | +| `responses` | `Dict[str, str]` | Dictionary mapping anonymous IDs to response texts. | +| `evaluator_name` | `str` | Name of the agent doing the evaluation. | + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | Formatted evaluation prompt string with instructions for ranking responses. | + +--- + +### `get_synthesis_prompt(query, original_responses, evaluations, id_to_member)` + +Creates synthesis prompt for the Chairman. + +#### Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `query` | `str` | Original user query. | +| `original_responses` | `Dict[str, str]` | Dictionary mapping member names to their responses. | +| `evaluations` | `Dict[str, str]` | Dictionary mapping evaluator names to their evaluation texts. | +| `id_to_member` | `Dict[str, str]` | Mapping from anonymous IDs to member names. | + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | Formatted synthesis prompt for the Chairman agent. | + +--- + +## Use Cases + +The LLM Council is ideal for scenarios requiring: + +- **Multi-perspective Analysis**: When you need diverse viewpoints on complex topics +- **Quality Assurance**: When peer review and ranking can improve response quality +- **Transparent Decision Making**: When you want to see how different models evaluate each other +- **Synthesis of Expertise**: When combining multiple specialized perspectives is valuable + +### Common Applications + +- **Medical Diagnosis**: Multiple medical AI agents provide diagnoses, evaluate each other, and synthesize recommendations +- **Financial Analysis**: Different financial experts analyze investments and rank each other's assessments +- **Legal Analysis**: Multiple legal perspectives evaluate compliance and risk +- **Business Strategy**: Diverse strategic viewpoints are synthesized into comprehensive plans +- **Research Analysis**: Multiple research perspectives are combined for thorough analysis + +## Examples + +For comprehensive examples demonstrating various use cases, see the [LLM Council Examples](../../../examples/multi_agent/llm_council_examples/) directory: + +- **Medical**: `medical_diagnosis_council.py`, `medical_treatment_council.py` +- **Finance**: `finance_analysis_council.py`, `etf_stock_analysis_council.py` +- **Business**: `business_strategy_council.py`, `marketing_strategy_council.py` +- **Technology**: `technology_assessment_council.py`, `research_analysis_council.py` +- **Legal**: `legal_analysis_council.py` + +### Quick Start Example + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Example query +query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + +# Optionally print evaluations +print("\n\n" + "="*80) +print("EVALUATIONS") +print("="*80) +for name, evaluation in result["evaluations"].items(): + print(f"\n{name}:") + print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) +``` + +## Customization + +### Creating Custom Council Members + +You can create custom council members with specialized roles: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +# Create custom councilor +custom_agent = Agent( + agent_name="Domain-Expert-Councilor", + agent_description="Specialized domain expert for specific analysis", + system_prompt=get_gpt_councilor_prompt(), # Or create custom prompt + model_name="gpt-4", + max_loops=1, + verbose=False, + temperature=0.7, +) + +# Create council with custom members +council = LLMCouncil( + council_members=[custom_agent, ...], # Add your custom agents + chairman_model="gpt-4", + verbose=True +) +``` + +### Custom Chairman Model + +You can specify a different model for the Chairman: + +```python +council = LLMCouncil( + chairman_model="claude-3-opus", # Use Claude as Chairman + verbose=True +) +``` + +## Architecture Benefits + +1. **Diversity**: Multiple models provide varied perspectives and approaches +2. **Quality Control**: Peer review ensures responses are evaluated objectively +3. **Synthesis**: Chairman combines the best elements from all responses +4. **Transparency**: Full visibility into individual responses and evaluation rankings +5. **Scalability**: Easy to add or remove council members +6. **Flexibility**: Supports custom agents and models + +## Performance Considerations + +- **Parallel Execution**: Both response generation and evaluation phases run in parallel for efficiency +- **Anonymization**: Responses are anonymized to prevent bias in evaluation +- **Model Selection**: Different models can be used for different roles based on their strengths +- **Verbose Mode**: Can be disabled for production use to reduce output + +## Related Documentation + +- [Multi-Agent Architectures Overview](overview.md) +- [Council of Judges](council_of_judges.md) - Similar peer review pattern +- [Agent Class Reference](agent.md) - Understanding individual agents +- [Multi-Agent Execution Utilities](various_execution_methods.md) - Underlying execution methods + diff --git a/examples/multi_agent/llm_council_examples/README.md b/examples/multi_agent/llm_council_examples/README.md new file mode 100644 index 00000000..3dd62f16 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/README.md @@ -0,0 +1,95 @@ +# LLM Council Examples + +This directory contains examples demonstrating the LLM Council pattern, inspired by Andrej Karpathy's llm-council implementation. The LLM Council uses multiple specialized AI agents that: + +1. Each respond independently to queries +2. Review and rank each other's anonymized responses +3. Have a Chairman synthesize all responses into a final comprehensive answer + +## Examples + +### Marketing & Business +- **marketing_strategy_council.py** - Marketing strategy analysis and recommendations +- **business_strategy_council.py** - Comprehensive business strategy development + +### Finance & Investment +- **finance_analysis_council.py** - Financial analysis and investment recommendations +- **etf_stock_analysis_council.py** - ETF and stock analysis with portfolio recommendations + +### Medical & Healthcare +- **medical_treatment_council.py** - Medical treatment recommendations and care plans +- **medical_diagnosis_council.py** - Diagnostic analysis based on symptoms + +### Technology & Research +- **technology_assessment_council.py** - Technology evaluation and implementation strategy +- **research_analysis_council.py** - Comprehensive research analysis on complex topics + +### Legal +- **legal_analysis_council.py** - Legal implications and compliance analysis + +## Usage + +Each example follows the same pattern: + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Run a query +result = council.run("Your query here") + +# Access results +print(result["final_response"]) # Chairman's synthesized answer +print(result["original_responses"]) # Individual member responses +print(result["evaluations"]) # How members ranked each other +``` + +## Running Examples + +Run any example directly: + +```bash +python examples/multi_agent/llm_council_examples/marketing_strategy_council.py +python examples/multi_agent/llm_council_examples/finance_analysis_council.py +python examples/multi_agent/llm_council_examples/medical_diagnosis_council.py +``` + +## Key Features + +- **Multiple Perspectives**: Each council member (GPT-5.1, Gemini, Claude, Grok) provides unique insights +- **Peer Review**: Members evaluate and rank each other's responses anonymously +- **Synthesis**: Chairman combines the best elements from all responses +- **Transparency**: See both individual responses and evaluation rankings + +## Council Members + +The default council consists of: +- **GPT-5.1-Councilor**: Analytical and comprehensive +- **Gemini-3-Pro-Councilor**: Concise and well-processed +- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced +- **Grok-4-Councilor**: Creative and innovative + +## Customization + +You can create custom council members: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +custom_agent = Agent( + agent_name="Custom-Councilor", + system_prompt=get_gpt_councilor_prompt(), + model_name="gpt-4.1", + max_loops=1, +) + +council = LLMCouncil( + council_members=[custom_agent, ...], + chairman_model="gpt-5.1", + verbose=True +) +``` + diff --git a/examples/multi_agent/llm_council_examples/business_strategy_council.py b/examples/multi_agent/llm_council_examples/business_strategy_council.py new file mode 100644 index 00000000..bacc8995 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/business_strategy_council.py @@ -0,0 +1,32 @@ +""" +LLM Council Example: Business Strategy Development + +This example demonstrates using the LLM Council to develop comprehensive +business strategies for new ventures. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Business strategy query +query = """ +A tech startup wants to launch an AI-powered personal finance app targeting +millennials and Gen Z. Develop a comprehensive business strategy including: +1. Market opportunity and competitive landscape analysis +2. Product positioning and unique value proposition +3. Go-to-market strategy and customer acquisition plan +4. Revenue model and pricing strategy +5. Key partnerships and distribution channels +6. Resource requirements and funding needs +7. Risk assessment and mitigation strategies +8. Success metrics and KPIs for first 12 months +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py new file mode 100644 index 00000000..b69ffb70 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py @@ -0,0 +1,30 @@ +""" +LLM Council Example: ETF Stock Analysis + +This example demonstrates using the LLM Council to analyze ETF holdings +and provide stock investment recommendations. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# ETF and stock analysis query +query = """ +Analyze the top energy ETFs (including nuclear, solar, gas, and renewable energy) +and provide: +1. Top 5 best-performing energy stocks across all energy sectors +2. ETF recommendations for diversified energy exposure +3. Risk-return profiles for each recommendation +4. Current market conditions affecting energy investments +5. Allocation strategy for a $100,000 portfolio +6. Key metrics to track for each investment +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/finance_analysis_council.py b/examples/multi_agent/llm_council_examples/finance_analysis_council.py new file mode 100644 index 00000000..d1f4c9a5 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/finance_analysis_council.py @@ -0,0 +1,30 @@ +""" +LLM Council Example: Financial Analysis + +This example demonstrates using the LLM Council to provide comprehensive +financial analysis and investment recommendations. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Financial analysis query +query = """ +Provide a comprehensive financial analysis for investing in emerging markets +technology ETFs. Include: +1. Risk assessment and volatility analysis +2. Historical performance trends +3. Sector composition and diversification benefits +4. Comparison with developed market tech ETFs +5. Recommended allocation percentage for a moderate risk portfolio +6. Key factors to monitor going forward +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/legal_analysis_council.py b/examples/multi_agent/llm_council_examples/legal_analysis_council.py new file mode 100644 index 00000000..01bdcdc8 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/legal_analysis_council.py @@ -0,0 +1,32 @@ +""" +LLM Council Example: Legal Analysis + +This example demonstrates using the LLM Council to analyze legal scenarios +and provide comprehensive legal insights. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Legal analysis query +query = """ +A startup is considering using AI-generated content for their marketing materials. +Analyze the legal implications including: +1. Intellectual property rights and ownership of AI-generated content +2. Copyright and trademark considerations +3. Liability for AI-generated content that may be inaccurate or misleading +4. Compliance with advertising regulations (FTC, FDA, etc.) +5. Data privacy implications if using customer data to train models +6. Contractual considerations with AI service providers +7. Risk mitigation strategies +8. Best practices for legal compliance +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/marketing_strategy_council.py b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py new file mode 100644 index 00000000..b033d982 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py @@ -0,0 +1,29 @@ +""" +LLM Council Example: Marketing Strategy Analysis + +This example demonstrates using the LLM Council to analyze and develop +comprehensive marketing strategies by leveraging multiple AI perspectives. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Marketing strategy query +query = """ +Analyze the marketing strategy for a new sustainable energy startup launching +a solar panel subscription service. Provide recommendations on: +1. Target audience segmentation +2. Key messaging and value propositions +3. Marketing channels and budget allocation +4. Competitive positioning +5. Launch timeline and milestones +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py new file mode 100644 index 00000000..f143945c --- /dev/null +++ b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py @@ -0,0 +1,37 @@ +""" +LLM Council Example: Medical Diagnosis Analysis + +This example demonstrates using the LLM Council to analyze symptoms +and provide diagnostic insights. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Medical diagnosis query +query = """ +A 35-year-old patient presents with: +- Persistent fatigue for 3 months +- Unexplained weight loss (15 lbs) +- Night sweats +- Intermittent low-grade fever +- Swollen lymph nodes in neck and armpits +- Recent blood work shows elevated ESR and CRP + +Provide: +1. Differential diagnosis with most likely conditions ranked +2. Additional diagnostic tests needed to confirm +3. Red flag symptoms requiring immediate attention +4. Possible causes and risk factors +5. Recommended next steps for the patient +6. When to seek emergency care +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/medical_treatment_council.py b/examples/multi_agent/llm_council_examples/medical_treatment_council.py new file mode 100644 index 00000000..cd828f1d --- /dev/null +++ b/examples/multi_agent/llm_council_examples/medical_treatment_council.py @@ -0,0 +1,31 @@ +""" +LLM Council Example: Medical Treatment Analysis + +This example demonstrates using the LLM Council to analyze medical treatments +and provide comprehensive treatment recommendations. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Medical treatment query +query = """ +A 45-year-old patient with Type 2 diabetes, hypertension, and early-stage +kidney disease needs treatment recommendations. Provide: +1. Comprehensive treatment plan addressing all conditions +2. Medication options with pros/cons for each condition +3. Lifestyle modifications and their expected impact +4. Monitoring schedule and key metrics to track +5. Potential drug interactions and contraindications +6. Expected outcomes and timeline for improvement +7. When to consider specialist referrals +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/research_analysis_council.py b/examples/multi_agent/llm_council_examples/research_analysis_council.py new file mode 100644 index 00000000..e276c96b --- /dev/null +++ b/examples/multi_agent/llm_council_examples/research_analysis_council.py @@ -0,0 +1,32 @@ +""" +LLM Council Example: Research Analysis + +This example demonstrates using the LLM Council to conduct comprehensive +research analysis on complex topics. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Research analysis query +query = """ +Conduct a comprehensive analysis of the potential impact of climate change +on global food security over the next 20 years. Include: +1. Key climate factors affecting agriculture (temperature, precipitation, extreme weather) +2. Regional vulnerabilities and impacts on major food-producing regions +3. Crop yield projections and food availability scenarios +4. Economic implications and food price volatility +5. Adaptation strategies and technological solutions +6. Policy recommendations for governments and international organizations +7. Role of innovation in agriculture (precision farming, GMOs, vertical farming) +8. Social and geopolitical implications of food insecurity +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/examples/multi_agent/llm_council_examples/technology_assessment_council.py b/examples/multi_agent/llm_council_examples/technology_assessment_council.py new file mode 100644 index 00000000..72c227a6 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/technology_assessment_council.py @@ -0,0 +1,32 @@ +""" +LLM Council Example: Technology Assessment + +This example demonstrates using the LLM Council to assess emerging technologies +and their business implications. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Technology assessment query +query = """ +Evaluate the business potential and implementation strategy for integrating +quantum computing capabilities into a financial services company. Consider: +1. Current state of quantum computing technology +2. Specific use cases in financial services (risk modeling, portfolio optimization, fraud detection) +3. Competitive advantages and potential ROI +4. Implementation timeline and resource requirements +5. Technical challenges and limitations +6. Risk factors and mitigation strategies +7. Partnership opportunities with quantum computing providers +8. Expected timeline for practical business value +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + diff --git a/hiearchical_swarm_example.py b/hiearchical_swarm_example.py index 753ebf0f..a5ed0633 100644 --- a/hiearchical_swarm_example.py +++ b/hiearchical_swarm_example.py @@ -1,5 +1,4 @@ -from swarms.structs.hiearchical_swarm import HierarchicalSwarm -from swarms.structs.agent import Agent +from swarms import Agent, HierarchicalSwarm # Create specialized agents research_agent = Agent( diff --git a/llm_council_example.py b/llm_council_example.py new file mode 100644 index 00000000..078d5360 --- /dev/null +++ b/llm_council_example.py @@ -0,0 +1,23 @@ +from swarms.structs.llm_council import LLMCouncil + +# Example usage of the LLM Council without a function: +# Create the council +council = LLMCouncil(verbose=True) + +# Example query +query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) + +# Optionally print evaluations +print("\n\n" + "="*80) +print("EVALUATIONS") +print("="*80) +for name, evaluation in result["evaluations"].items(): + print(f"\n{name}:") + print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) + diff --git a/pyproject.toml b/pyproject.toml index 10ad1565..dceec924 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "8.6.3" +version = "8.6.4" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index e0d3430a..6952d2b0 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -11,6 +11,7 @@ from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms.structs.conversation import Conversation from swarms.structs.council_as_judge import CouncilAsAJudge from swarms.structs.cron_job import CronJob +from swarms.structs.llm_council import LLMCouncil from swarms.structs.debate_with_judge import DebateWithJudge from swarms.structs.graph_workflow import ( Edge, @@ -161,6 +162,7 @@ __all__ = [ "get_swarms_info", "AutoSwarmBuilder", "CouncilAsAJudge", + "LLMCouncil", "batch_agent_execution", "aggregate", "find_agent_by_name", diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index e693a90c..1bc3dc52 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -679,6 +679,7 @@ class AOP: self.tool_configs: Dict[str, AgentToolConfig] = {} self.task_queues: Dict[str, TaskQueue] = {} self.transport = transport + self.mcp_server = FastMCP( name=server_name, port=port, diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py new file mode 100644 index 00000000..864ec976 --- /dev/null +++ b/swarms/structs/llm_council.py @@ -0,0 +1,459 @@ +""" +LLM Council - A Swarms implementation inspired by Andrej Karpathy's llm-council. + +This implementation creates a council of specialized LLM agents that: +1. Each agent responds to the user query independently +2. All agents review and rank each other's (anonymized) responses +3. A Chairman LLM synthesizes all responses and rankings into a final answer + +The council demonstrates how different models evaluate and rank each other's work, +often selecting responses from other models as superior to their own. +""" + +from typing import Dict, List, Optional +import random +from swarms import Agent +from swarms.structs.multi_agent_exec import ( + run_agents_concurrently, + batched_grid_agent_execution, +) + + +def get_gpt_councilor_prompt() -> str: + """ + Get system prompt for GPT-5.1 councilor. + + Returns: + System prompt string for GPT-5.1 councilor agent. + """ + return """You are a member of the LLM Council, representing GPT-5.1. Your role is to provide comprehensive, analytical, and thorough responses to user queries. + +Your strengths: +- Deep analytical thinking and comprehensive coverage +- Ability to break down complex topics into detailed components +- Thorough exploration of multiple perspectives +- Rich contextual understanding + +Your approach: +- Provide detailed, well-structured responses +- Include relevant context and background information +- Consider multiple angles and perspectives +- Be thorough but clear in your explanations + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on quality, depth, and clarity.""" + + +def get_gemini_councilor_prompt() -> str: + """ + Get system prompt for Gemini 3 Pro councilor. + + Returns: + System prompt string for Gemini 3 Pro councilor agent. + """ + return """You are a member of the LLM Council, representing Gemini 3 Pro. Your role is to provide concise, well-processed, and structured responses to user queries. + +Your strengths: +- Clear and structured communication +- Efficient information processing +- Condensed yet comprehensive responses +- Well-organized presentation + +Your approach: +- Provide concise but complete answers +- Structure information clearly and logically +- Focus on key points without unnecessary verbosity +- Present information in an easily digestible format + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on clarity, structure, and efficiency.""" + + +def get_claude_councilor_prompt() -> str: + """ + Get system prompt for Claude Sonnet 4.5 councilor. + + Returns: + System prompt string for Claude Sonnet 4.5 councilor agent. + """ + return """You are a member of the LLM Council, representing Claude Sonnet 4.5. Your role is to provide thoughtful, balanced, and nuanced responses to user queries. + +Your strengths: +- Nuanced understanding and balanced perspectives +- Thoughtful consideration of trade-offs +- Clear reasoning and logical structure +- Ethical and responsible analysis + +Your approach: +- Provide balanced, well-reasoned responses +- Consider multiple viewpoints and implications +- Be thoughtful about potential limitations or edge cases +- Maintain clarity while showing depth of thought + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on thoughtfulness, balance, and nuanced reasoning.""" + + +def get_grok_councilor_prompt() -> str: + """ + Get system prompt for Grok-4 councilor. + + Returns: + System prompt string for Grok-4 councilor agent. + """ + return """You are a member of the LLM Council, representing Grok-4. Your role is to provide creative, innovative, and unique perspectives on user queries. + +Your strengths: +- Creative problem-solving and innovative thinking +- Unique perspectives and out-of-the-box approaches +- Engaging and dynamic communication style +- Ability to connect seemingly unrelated concepts + +Your approach: +- Provide creative and innovative responses +- Offer unique perspectives and fresh insights +- Be engaging and dynamic in your communication +- Think creatively while maintaining accuracy + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on creativity, innovation, and unique insights.""" + + +def get_chairman_prompt() -> str: + """ + Get system prompt for the Chairman agent. + + Returns: + System prompt string for the Chairman agent. + """ + return """You are the Chairman of the LLM Council. Your role is to synthesize responses from all council members along with their evaluations and rankings into a final, comprehensive answer. + +Your responsibilities: +1. Review all council member responses to the user's query +2. Consider the rankings and evaluations provided by each council member +3. Synthesize the best elements from all responses +4. Create a final, comprehensive answer that incorporates the strengths of different approaches +5. Provide transparency about which perspectives influenced the final answer + +Your approach: +- Synthesize rather than simply aggregate +- Identify the strongest elements from each response +- Create a cohesive final answer that benefits from multiple perspectives +- Acknowledge the diversity of approaches taken by council members +- Provide a balanced, comprehensive response that serves the user's needs + +Remember: You have access to all original responses and all evaluations. Use this rich context to create the best possible final answer.""" + + +def get_evaluation_prompt(query: str, responses: Dict[str, str], evaluator_name: str) -> str: + """ + Create evaluation prompt for council members to review and rank responses. + + Args: + query: The original user query + responses: Dictionary mapping anonymous IDs to response texts + evaluator_name: Name of the agent doing the evaluation + + Returns: + Formatted evaluation prompt string + """ + responses_text = "\n\n".join([ + f"Response {response_id}:\n{response_text}" + for response_id, response_text in responses.items() + ]) + + return f"""You are evaluating responses from your fellow LLM Council members to the following query: + +QUERY: {query} + +Below are the anonymized responses from all council members (including potentially your own): + +{responses_text} + +Your task: +1. Carefully read and analyze each response +2. Evaluate the quality, accuracy, completeness, and usefulness of each response +3. Rank the responses from best to worst (1 = best, {len(responses)} = worst) +4. Provide brief reasoning for your rankings +5. Be honest and objective - you may find another model's response superior to your own + +Format your evaluation as follows: + +RANKINGS: +1. Response [ID]: [Brief reason why this is the best] +2. Response [ID]: [Brief reason] +... +{len(responses)}. Response [ID]: [Brief reason why this ranks lowest] + +ADDITIONAL OBSERVATIONS: +[Any additional insights about the responses, common themes, strengths/weaknesses, etc.] + +Remember: The goal is honest, objective evaluation. If another model's response is genuinely better, acknowledge it.""" + + +def get_synthesis_prompt( + query: str, + original_responses: Dict[str, str], + evaluations: Dict[str, str], + id_to_member: Dict[str, str] +) -> str: + """ + Create synthesis prompt for the Chairman. + + Args: + query: Original user query + original_responses: Dict mapping member names to their responses + evaluations: Dict mapping evaluator names to their evaluation texts + id_to_member: Mapping from anonymous IDs to member names + + Returns: + Formatted synthesis prompt + """ + responses_section = "\n\n".join([ + f"=== {name} ===\n{response}" + for name, response in original_responses.items() + ]) + + evaluations_section = "\n\n".join([ + f"=== Evaluation by {name} ===\n{evaluation}" + for name, evaluation in evaluations.items() + ]) + + return f"""As the Chairman of the LLM Council, synthesize the following information into a final, comprehensive answer. + +ORIGINAL QUERY: +{query} + +COUNCIL MEMBER RESPONSES: +{responses_section} + +COUNCIL MEMBER EVALUATIONS AND RANKINGS: +{evaluations_section} + +ANONYMOUS ID MAPPING (for reference): +{chr(10).join([f" {aid} = {name}" for aid, name in id_to_member.items()])} + +Your task: +1. Review all council member responses +2. Consider the evaluations and rankings provided by each member +3. Identify the strongest elements from each response +4. Synthesize a final, comprehensive answer that: + - Incorporates the best insights from multiple perspectives + - Addresses the query thoroughly and accurately + - Benefits from the diversity of approaches taken + - Is clear, well-structured, and useful + +Provide your final synthesized response below. You may reference which perspectives or approaches influenced different parts of your answer.""" + + +class LLMCouncil: + """ + An LLM Council that orchestrates multiple specialized agents to collaboratively + answer queries through independent responses, peer review, and synthesis. + + The council follows this workflow: + 1. Dispatch query to all council members in parallel + 2. Collect all responses (anonymized) + 3. Have each member review and rank all responses + 4. Chairman synthesizes everything into final response + """ + + def __init__( + self, + council_members: Optional[List[Agent]] = None, + chairman_model: str = "gpt-5.1", + verbose: bool = True, + ): + """ + Initialize the LLM Council. + + Args: + council_members: List of Agent instances representing council members. + If None, creates default council with GPT-5.1, Gemini 3 Pro, + Claude Sonnet 4.5, and Grok-4. + chairman_model: Model name for the Chairman agent that synthesizes responses. + verbose: Whether to print progress and intermediate results. + """ + self.verbose = verbose + + # Create default council members if none provided + if council_members is None: + self.council_members = self._create_default_council() + else: + self.council_members = council_members + + # Create Chairman agent + self.chairman = Agent( + agent_name="Chairman", + agent_description="Chairman of the LLM Council, responsible for synthesizing all responses and rankings into a final answer", + system_prompt=get_chairman_prompt(), + model_name=chairman_model, + max_loops=1, + verbose=verbose, + temperature=0.7, + ) + + if self.verbose: + print(f"šŸ›ļø LLM Council initialized with {len(self.council_members)} members") + for i, member in enumerate(self.council_members, 1): + print(f" {i}. {member.agent_name} ({member.model_name})") + + def _create_default_council(self) -> List[Agent]: + """ + Create default council members with specialized prompts and models. + + Returns: + List of Agent instances configured as council members. + """ + + # GPT-5.1 Agent - Analytical and comprehensive + gpt_agent = Agent( + agent_name="GPT-5.1-Councilor", + agent_description="Analytical and comprehensive AI councilor specializing in deep analysis and thorough responses", + system_prompt=get_gpt_councilor_prompt(), + model_name="gpt-5.1", + max_loops=1, + verbose=False, + temperature=0.7, + ) + + # Gemini 3 Pro Agent - Concise and processed + gemini_agent = Agent( + agent_name="Gemini-3-Pro-Councilor", + agent_description="Concise and well-processed AI councilor specializing in clear, structured responses", + system_prompt=get_gemini_councilor_prompt(), + model_name="gemini-2.5-flash", # Using available Gemini model + max_loops=1, + verbose=False, + temperature=0.7, + ) + + # Claude Sonnet 4.5 Agent - Balanced and thoughtful + claude_agent = Agent( + agent_name="Claude-Sonnet-4.5-Councilor", + agent_description="Thoughtful and balanced AI councilor specializing in nuanced and well-reasoned responses", + system_prompt=get_claude_councilor_prompt(), + model_name="anthropic/claude-sonnet-4-5", # Using available Claude model + max_loops=1, + verbose=False, + temperature=0.0, + top_p=None, + ) + + # Grok-4 Agent - Creative and innovative + grok_agent = Agent( + agent_name="Grok-4-Councilor", + agent_description="Creative and innovative AI councilor specializing in unique perspectives and creative solutions", + system_prompt=get_grok_councilor_prompt(), + model_name="x-ai/grok-4", # Using available model as proxy for Grok-4 + max_loops=1, + verbose=False, + temperature=0.8, + ) + + members = [gpt_agent, gemini_agent, claude_agent, grok_agent] + + return members + + def run(self, query: str) -> Dict: + """ + Execute the full LLM Council workflow. + + Args: + query: The user's query to process + + Returns: + Dictionary containing: + - original_responses: Dict mapping member names to their responses + - evaluations: Dict mapping evaluator names to their rankings + - final_response: The Chairman's synthesized final answer + """ + if self.verbose: + print(f"\n{'='*80}") + print("šŸ›ļø LLM COUNCIL SESSION") + print("="*80) + print(f"\nšŸ“ Query: {query}\n") + + # Step 1: Get responses from all council members in parallel + if self.verbose: + print("šŸ“¤ Dispatching query to all council members...") + + results_dict = run_agents_concurrently( + self.council_members, + task=query, + return_agent_output_dict=True + ) + + # Map results to member names + original_responses = { + member.agent_name: response + for member, response in zip(self.council_members, + [results_dict.get(member.agent_name, "") + for member in self.council_members]) + } + + if self.verbose: + print(f"āœ… Received {len(original_responses)} responses\n") + for name, response in original_responses.items(): + print(f" {name}: {response[:100]}...") + + # Step 2: Anonymize responses for evaluation + # Create anonymous IDs (A, B, C, D, etc.) + anonymous_ids = [chr(65 + i) for i in range(len(self.council_members))] + random.shuffle(anonymous_ids) # Shuffle to ensure anonymity + + anonymous_responses = { + anonymous_ids[i]: original_responses[member.agent_name] + for i, member in enumerate(self.council_members) + } + + # Create mapping from anonymous ID to member name (for later reference) + id_to_member = { + anonymous_ids[i]: member.agent_name + for i, member in enumerate(self.council_members) + } + + if self.verbose: + print("\nšŸ” Council members evaluating each other's responses...") + + # Step 3: Have each member evaluate and rank all responses concurrently + # Create evaluation tasks for each member + evaluation_tasks = [ + get_evaluation_prompt(query, anonymous_responses, member.agent_name) + for member in self.council_members + ] + + # Run evaluations concurrently using batched_grid_agent_execution + evaluation_results = batched_grid_agent_execution( + self.council_members, + evaluation_tasks + ) + + # Map results to member names + evaluations = { + member.agent_name: evaluation_results[i] + for i, member in enumerate(self.council_members) + } + + if self.verbose: + print(f"āœ… Received {len(evaluations)} evaluations\n") + + # Step 4: Chairman synthesizes everything + if self.verbose: + print("šŸ‘” Chairman synthesizing final response...\n") + + synthesis_prompt = get_synthesis_prompt( + query, original_responses, evaluations, id_to_member + ) + + final_response = self.chairman.run(task=synthesis_prompt) + + if self.verbose: + print(f"{'='*80}") + print("āœ… FINAL RESPONSE") + print(f"{'='*80}\n") + + return { + "query": query, + "original_responses": original_responses, + "evaluations": evaluations, + "final_response": final_response, + "anonymous_mapping": id_to_member, + } + From 31e304305920bdbd1028f2cd3834df2d6e5079fa Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sat, 22 Nov 2025 21:48:11 -0800 Subject: [PATCH 13/16] [LLMCouncil][Fix import issue with agent] --- llm_council_example.py | 4 ---- pyproject.toml | 2 +- swarms/structs/llm_council.py | 2 +- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/llm_council_example.py b/llm_council_example.py index 078d5360..26f4bfec 100644 --- a/llm_council_example.py +++ b/llm_council_example.py @@ -1,6 +1,5 @@ from swarms.structs.llm_council import LLMCouncil -# Example usage of the LLM Council without a function: # Create the council council = LLMCouncil(verbose=True) @@ -14,9 +13,6 @@ result = council.run(query) print(result["final_response"]) # Optionally print evaluations -print("\n\n" + "="*80) -print("EVALUATIONS") -print("="*80) for name, evaluation in result["evaluations"].items(): print(f"\n{name}:") print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) diff --git a/pyproject.toml b/pyproject.toml index dceec924..0336f41f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "8.6.4" +version = "8.6.5" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py index 864ec976..b422136b 100644 --- a/swarms/structs/llm_council.py +++ b/swarms/structs/llm_council.py @@ -12,7 +12,7 @@ often selecting responses from other models as superior to their own. from typing import Dict, List, Optional import random -from swarms import Agent +from swarms.structs.agent import Agent from swarms.structs.multi_agent_exec import ( run_agents_concurrently, batched_grid_agent_execution, From 74f7bcd2b7f93822e6351d81db605ff86b8aaa17 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 23 Nov 2025 23:27:10 -0800 Subject: [PATCH 14/16] [Improve LLMCouncil] [Improved docs] --- docs/swarms/structs/llm_council.md | 244 ++++++++++++------ .../business_strategy_council.py | 1 - .../etf_stock_analysis_council.py | 1 - .../finance_analysis_council.py | 1 - .../legal_analysis_council.py | 1 - .../marketing_strategy_council.py | 1 - .../medical_diagnosis_council.py | 1 - .../medical_treatment_council.py | 1 - .../research_analysis_council.py | 1 - .../technology_assessment_council.py | 1 - llm_council_example.py | 7 +- swarms/agents/reasoning_agents.py | 4 +- swarms/agents/reasoning_duo.py | 4 +- swarms/structs/aop.py | 2 +- swarms/structs/llm_council.py | 215 +++++++++------ tests/structs/test_auto_swarms_builder.py | 4 +- tests/structs/test_i_agent.py | 2 - tests/structs/test_sequential_workflow.py | 1 - 18 files changed, 299 insertions(+), 193 deletions(-) diff --git a/docs/swarms/structs/llm_council.md b/docs/swarms/structs/llm_council.md index 6352bcef..0f83b0d9 100644 --- a/docs/swarms/structs/llm_council.md +++ b/docs/swarms/structs/llm_council.md @@ -2,61 +2,35 @@ ```mermaid flowchart TD - A[User Query] --> B[LLM Council Initialization] - B --> C{Council Members Provided?} - C -->|No| D[Create Default Council] - C -->|Yes| E[Use Provided Members] - D --> F[Step 1: Parallel Response Generation] - E --> F + A[User Query] --> B[Council Members] - subgraph "Default Council Members" - G1[GPT-5.1-Councilor
Analytical & Comprehensive] - G2[Gemini-3-Pro-Councilor
Concise & Structured] - G3[Claude-Sonnet-4.5-Councilor
Thoughtful & Balanced] - G4[Grok-4-Councilor
Creative & Innovative] + subgraph "Council Members" + C1[GPT-5.1-Councilor] + C2[Gemini-3-Pro-Councilor] + C3[Claude-Sonnet-4.5-Councilor] + C4[Grok-4-Councilor] end - F --> G1 - F --> G2 - F --> G3 - F --> G4 + B --> C1 + B --> C2 + B --> C3 + B --> C4 - G1 --> H[Collect All Responses] - G2 --> H - G3 --> H - G4 --> H + C1 --> D[Responses] + C2 --> D + C3 --> D + C4 --> D - H --> I[Step 2: Anonymize Responses] - I --> J[Assign Anonymous IDs: A, B, C, D...] - - J --> K[Step 3: Parallel Evaluation] - - subgraph "Evaluation Phase" - K --> L1[Member 1 Evaluates All] - K --> L2[Member 2 Evaluates All] - K --> L3[Member 3 Evaluates All] - K --> L4[Member 4 Evaluates All] - end - - L1 --> M[Collect Evaluations & Rankings] - L2 --> M - L3 --> M - L4 --> M - - M --> N[Step 4: Chairman Synthesis] - N --> O[Chairman Agent] - O --> P[Final Synthesized Response] - - P --> Q[Return Results Dictionary] - - style A fill:#e1f5ff - style P fill:#c8e6c9 - style Q fill:#c8e6c9 - style O fill:#fff9c4 + D --> E[Anonymize & Evaluate] + E --> F[Chairman Synthesis] + F --> G[Final Response] + ``` The `LLMCouncil` class orchestrates multiple specialized LLM agents to collaboratively answer queries through a structured peer review and synthesis process. Inspired by Andrej Karpathy's llm-council implementation, this architecture demonstrates how different models evaluate and rank each other's work, often selecting responses from other models as superior to their own. +The class automatically tracks all agent messages in a `Conversation` object and formats output using `history_output_formatter`, providing flexible output formats including dictionaries, lists, strings, JSON, YAML, and more. + ## Workflow Overview The LLM Council follows a four-step process: @@ -80,6 +54,8 @@ class LLMCouncil: |-----------|------|-------------|---------| | `council_members` | `List[Agent]` | List of Agent instances representing council members | `None` (creates default council) | | `chairman` | `Agent` | The Chairman agent responsible for synthesizing responses | Created during initialization | +| `conversation` | `Conversation` | Conversation object tracking all messages throughout the workflow | Created during initialization | +| `output_type` | `HistoryOutputType` | Format for the output (e.g., "dict", "list", "string", "json", "yaml") | `"dict"` | | `verbose` | `bool` | Whether to print progress and intermediate results | `True` | ## Methods @@ -92,9 +68,13 @@ Initializes the LLM Council with council members and a Chairman agent. | Parameter | Type | Default | Description | |-----------|------|---------|-------------| +| `id` | `str` | `swarm_id()` | Unique identifier for the council instance. | +| `name` | `str` | `"LLM Council"` | Name of the council instance. | +| `description` | `str` | `"A collaborative council..."` | Description of the council's purpose. | | `council_members` | `Optional[List[Agent]]` | `None` | List of Agent instances representing council members. If `None`, creates default council with GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4. | | `chairman_model` | `str` | `"gpt-5.1"` | Model name for the Chairman agent that synthesizes responses. | | `verbose` | `bool` | `True` | Whether to print progress and intermediate results. | +| `output_type` | `HistoryOutputType` | `"dict"` | Format for the output. Options: "list", "dict", "string", "final", "json", "yaml", "xml", "dict-all-except-first", "str-all-except-first", "dict-final", "list-final". | #### Returns @@ -105,12 +85,13 @@ Initializes the LLM Council with council members and a Chairman agent. #### Description Creates an LLM Council instance with specialized council members. If no members are provided, it creates a default council consisting of: + - **GPT-5.1-Councilor**: Analytical and comprehensive responses - **Gemini-3-Pro-Councilor**: Concise and well-processed responses - **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced responses - **Grok-4-Councilor**: Creative and innovative responses -The Chairman agent is automatically created with a specialized prompt for synthesizing responses. +The Chairman agent is automatically created with a specialized prompt for synthesizing responses. A `Conversation` object is also initialized to track all messages throughout the workflow, including user queries, council member responses, evaluations, and the final synthesis. #### Example Usage @@ -120,7 +101,7 @@ from swarms.structs.llm_council import LLMCouncil # Create council with default members council = LLMCouncil(verbose=True) -# Create council with custom members +# Create council with custom members and output format from swarms import Agent custom_members = [ Agent(agent_name="Expert-1", model_name="gpt-4", max_loops=1), @@ -129,7 +110,8 @@ custom_members = [ council = LLMCouncil( council_members=custom_members, chairman_model="gpt-4", - verbose=True + verbose=True, + output_type="json" # Output as JSON string ) ``` @@ -137,7 +119,7 @@ council = LLMCouncil( ### `run` -Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis. +Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis. All messages are tracked in the conversation object and formatted according to the `output_type` setting. #### Parameters @@ -149,54 +131,79 @@ Executes the full LLM Council workflow: parallel responses, anonymization, peer | Type | Description | |------|-------------| -| `Dict` | Dictionary containing the following keys: | +| `Union[List, Dict, str]` | Formatted output based on `output_type`. The output contains the conversation history with all messages tracked throughout the workflow. | -#### Return Dictionary Structure +#### Output Format -| Key | Type | Description | -|-----|------|-------------| -| `query` | `str` | The original user query. | -| `original_responses` | `Dict[str, str]` | Dictionary mapping council member names to their original responses. | -| `evaluations` | `Dict[str, str]` | Dictionary mapping evaluator names to their evaluation texts (rankings and reasoning). | -| `final_response` | `str` | The Chairman's synthesized final answer combining all perspectives. | -| `anonymous_mapping` | `Dict[str, str]` | Mapping from anonymous IDs (A, B, C, D) to member names for reference. | +The return value depends on the `output_type` parameter set during initialization: + +- **`"dict"`** (default): Returns conversation as a dictionary/list of message dictionaries +- **`"list"`**: Returns conversation as a list of formatted strings (`"role: content"`) +- **`"string"`** or **`"str"`**: Returns conversation as a formatted string +- **`"final"`** or **`"last"`**: Returns only the content of the final message (Chairman's response) +- **`"json"`**: Returns conversation as a JSON string +- **`"yaml"`**: Returns conversation as a YAML string +- **`"xml"`**: Returns conversation as an XML string +- **`"dict-all-except-first"`**: Returns all messages except the first as a dictionary +- **`"str-all-except-first"`**: Returns all messages except the first as a string +- **`"dict-final"`**: Returns the final message as a dictionary +- **`"list-final"`**: Returns the final message as a list + +#### Conversation Tracking + +All messages are automatically tracked in the conversation object with the following roles: + +- **`"User"`**: The original user query +- **`"{member_name}"`**: Each council member's response (e.g., "GPT-5.1-Councilor") +- **`"{member_name}-Evaluation"`**: Each council member's evaluation (e.g., "GPT-5.1-Councilor-Evaluation") +- **`"Chairman"`**: The final synthesized response #### Description Executes the complete LLM Council workflow: -1. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently` -2. **Collection Phase**: Collects all responses and maps them to member names -3. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity -4. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution` -5. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer +1. **User Query Tracking**: Adds the user query to the conversation as "User" role +2. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently` +3. **Collection Phase**: Collects all responses, maps them to member names, and adds each to the conversation with the member's name as the role +4. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity +5. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution`, then adds evaluations to the conversation with "{member_name}-Evaluation" as the role +6. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer, which is added to the conversation as "Chairman" role +7. **Output Formatting**: Returns the conversation formatted according to the `output_type` setting using `history_output_formatter` -The method provides verbose output by default, showing progress at each stage. +The method provides verbose output by default, showing progress at each stage. All messages are tracked in the `conversation` attribute for later access or export. #### Example Usage ```python from swarms.structs.llm_council import LLMCouncil +# Create council with default output format (dict) council = LLMCouncil(verbose=True) query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" +# Run the council - returns formatted conversation based on output_type result = council.run(query) -# Access the final synthesized response -print(result["final_response"]) +# With default "dict" output_type, result is a list of message dictionaries +# Access conversation messages +for message in result: + print(f"{message['role']}: {message['content'][:200]}...") + +# Access the conversation object directly for more control +conversation = council.conversation +print("\nFinal message:", conversation.get_final_message_content()) -# Access individual member responses -for name, response in result["original_responses"].items(): - print(f"{name}: {response[:200]}...") +# Get conversation as string +print("\nFull conversation:") +print(conversation.get_str()) -# Access evaluation rankings -for evaluator, evaluation in result["evaluations"].items(): - print(f"{evaluator} evaluation:\n{evaluation[:300]}...") +# Example with different output types +council_json = LLMCouncil(output_type="json", verbose=False) +result_json = council_json.run(query) # Returns JSON string -# Check anonymous mapping -print("Anonymous IDs:", result["anonymous_mapping"]) +council_final = LLMCouncil(output_type="final", verbose=False) +result_final = council_final.run(query) # Returns only final response string ``` --- @@ -225,6 +232,7 @@ Internal method that creates the default council configuration with four special - **Grok-4-Councilor** (`model_name="x-ai/grok-4"`): Creative and innovative, temperature=0.8 Each agent is configured with: + - Specialized system prompts matching their role - `max_loops=1` for single-response generation - `verbose=False` to reduce noise during parallel execution @@ -367,25 +375,40 @@ For comprehensive examples demonstrating various use cases, see the [LLM Council ```python from swarms.structs.llm_council import LLMCouncil -# Create the council +# Create the council with default output format council = LLMCouncil(verbose=True) # Example query query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" -# Run the council +# Run the council - returns formatted conversation result = council.run(query) -# Print final response -print(result["final_response"]) +# With default "dict" output_type, result is a list of message dictionaries +# Print all messages +for message in result: + role = message['role'] + content = message['content'] + print(f"\n{role}:") + print(content[:500] + "..." if len(content) > 500 else content) -# Optionally print evaluations -print("\n\n" + "="*80) -print("EVALUATIONS") +# Access conversation object directly for more options +conversation = council.conversation + +# Get only the final response +print("\n" + "="*80) +print("FINAL RESPONSE") print("="*80) -for name, evaluation in result["evaluations"].items(): - print(f"\n{name}:") - print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) +print(conversation.get_final_message_content()) + +# Get conversation as formatted string +print("\n" + "="*80) +print("FULL CONVERSATION") +print("="*80) +print(conversation.get_str()) + +# Export conversation to JSON +conversation.export() ``` ## Customization @@ -428,6 +451,50 @@ council = LLMCouncil( ) ``` +### Custom Output Format + +You can control the output format using the `output_type` parameter: + +```python +# Get output as JSON string +council = LLMCouncil(output_type="json") +result = council.run(query) # Returns JSON string + +# Get only the final response +council = LLMCouncil(output_type="final") +result = council.run(query) # Returns only final response string + +# Get as YAML +council = LLMCouncil(output_type="yaml") +result = council.run(query) # Returns YAML string + +# Get as formatted string +council = LLMCouncil(output_type="string") +result = council.run(query) # Returns formatted conversation string +``` + +### Accessing Conversation History + +The conversation object is accessible for advanced usage: + +```python +council = LLMCouncil() +council.run(query) + +# Access conversation directly +conversation = council.conversation + +# Get conversation history +history = conversation.conversation_history + +# Export to file +conversation.export() # Saves to default location + +# Get specific format +json_output = conversation.to_json() +yaml_output = conversation.return_messages_as_dictionary() +``` + ## Architecture Benefits 1. **Diversity**: Multiple models provide varied perspectives and approaches @@ -436,6 +503,8 @@ council = LLMCouncil( 4. **Transparency**: Full visibility into individual responses and evaluation rankings 5. **Scalability**: Easy to add or remove council members 6. **Flexibility**: Supports custom agents and models +7. **Conversation Tracking**: All messages are automatically tracked in a Conversation object for history and export +8. **Flexible Output**: Multiple output formats supported via `history_output_formatter` (dict, list, string, JSON, YAML, XML, etc.) ## Performance Considerations @@ -443,11 +512,14 @@ council = LLMCouncil( - **Anonymization**: Responses are anonymized to prevent bias in evaluation - **Model Selection**: Different models can be used for different roles based on their strengths - **Verbose Mode**: Can be disabled for production use to reduce output +- **Conversation Management**: Conversation object efficiently tracks all messages in memory and supports export to JSON/YAML files +- **Output Formatting**: Choose lightweight output formats (e.g., "final") for production to reduce memory usage ## Related Documentation - [Multi-Agent Architectures Overview](overview.md) - [Council of Judges](council_of_judges.md) - Similar peer review pattern - [Agent Class Reference](agent.md) - Understanding individual agents +- [Conversation Class Reference](conversation.md) - Understanding conversation tracking and management - [Multi-Agent Execution Utilities](various_execution_methods.md) - Underlying execution methods - +- [History Output Formatter](../../../swarms/utils/history_output_formatter.py) - Output formatting utilities diff --git a/examples/multi_agent/llm_council_examples/business_strategy_council.py b/examples/multi_agent/llm_council_examples/business_strategy_council.py index bacc8995..10b5087b 100644 --- a/examples/multi_agent/llm_council_examples/business_strategy_council.py +++ b/examples/multi_agent/llm_council_examples/business_strategy_council.py @@ -29,4 +29,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py index b69ffb70..7e85d851 100644 --- a/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py +++ b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py @@ -27,4 +27,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/finance_analysis_council.py b/examples/multi_agent/llm_council_examples/finance_analysis_council.py index d1f4c9a5..f014be47 100644 --- a/examples/multi_agent/llm_council_examples/finance_analysis_council.py +++ b/examples/multi_agent/llm_council_examples/finance_analysis_council.py @@ -27,4 +27,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/legal_analysis_council.py b/examples/multi_agent/llm_council_examples/legal_analysis_council.py index 01bdcdc8..5ea3481e 100644 --- a/examples/multi_agent/llm_council_examples/legal_analysis_council.py +++ b/examples/multi_agent/llm_council_examples/legal_analysis_council.py @@ -29,4 +29,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/marketing_strategy_council.py b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py index b033d982..a799c364 100644 --- a/examples/multi_agent/llm_council_examples/marketing_strategy_council.py +++ b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py @@ -26,4 +26,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py index f143945c..90532f38 100644 --- a/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py +++ b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py @@ -34,4 +34,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/medical_treatment_council.py b/examples/multi_agent/llm_council_examples/medical_treatment_council.py index cd828f1d..6084db4c 100644 --- a/examples/multi_agent/llm_council_examples/medical_treatment_council.py +++ b/examples/multi_agent/llm_council_examples/medical_treatment_council.py @@ -28,4 +28,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/research_analysis_council.py b/examples/multi_agent/llm_council_examples/research_analysis_council.py index e276c96b..74a8585a 100644 --- a/examples/multi_agent/llm_council_examples/research_analysis_council.py +++ b/examples/multi_agent/llm_council_examples/research_analysis_council.py @@ -29,4 +29,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/examples/multi_agent/llm_council_examples/technology_assessment_council.py b/examples/multi_agent/llm_council_examples/technology_assessment_council.py index 72c227a6..4db4dd95 100644 --- a/examples/multi_agent/llm_council_examples/technology_assessment_council.py +++ b/examples/multi_agent/llm_council_examples/technology_assessment_council.py @@ -29,4 +29,3 @@ result = council.run(query) # Print final response print(result["final_response"]) - diff --git a/llm_council_example.py b/llm_council_example.py index 26f4bfec..1cc415d0 100644 --- a/llm_council_example.py +++ b/llm_council_example.py @@ -15,5 +15,8 @@ print(result["final_response"]) # Optionally print evaluations for name, evaluation in result["evaluations"].items(): print(f"\n{name}:") - print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation) - + print( + evaluation[:500] + "..." + if len(evaluation) > 500 + else evaluation + ) diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py index 749002db..e64ab828 100644 --- a/swarms/agents/reasoning_agents.py +++ b/swarms/agents/reasoning_agents.py @@ -88,9 +88,7 @@ class ReasoningAgentRouter: eval: bool = False, random_models_on: bool = False, majority_voting_prompt: Optional[str] = None, - reasoning_model_name: Optional[ - str - ] = "gpt-4o", + reasoning_model_name: Optional[str] = "gpt-4o", ): """ Initialize the ReasoningAgentRouter with the specified configuration. diff --git a/swarms/agents/reasoning_duo.py b/swarms/agents/reasoning_duo.py index 81fa0310..581a69e7 100644 --- a/swarms/agents/reasoning_duo.py +++ b/swarms/agents/reasoning_duo.py @@ -35,9 +35,7 @@ class ReasoningDuo: model_names: list[str] = ["gpt-4o-mini", "gpt-4.1"], system_prompt: str = "You are a helpful assistant that can answer questions and help with tasks.", output_type: OutputType = "dict-all-except-first", - reasoning_model_name: Optional[ - str - ] = "gpt-4o", + reasoning_model_name: Optional[str] = "gpt-4o", max_loops: int = 1, *args, **kwargs, diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index 1bc3dc52..141dfe62 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -679,7 +679,7 @@ class AOP: self.tool_configs: Dict[str, AgentToolConfig] = {} self.task_queues: Dict[str, TaskQueue] = {} self.transport = transport - + self.mcp_server = FastMCP( name=server_name, port=port, diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py index b422136b..fa2a6ab5 100644 --- a/swarms/structs/llm_council.py +++ b/swarms/structs/llm_council.py @@ -17,12 +17,14 @@ from swarms.structs.multi_agent_exec import ( run_agents_concurrently, batched_grid_agent_execution, ) - +from swarms.utils.history_output_formatter import HistoryOutputType, history_output_formatter +from swarms.structs.conversation import Conversation +from swarms.structs.swarm_id import swarm_id def get_gpt_councilor_prompt() -> str: """ Get system prompt for GPT-5.1 councilor. - + Returns: System prompt string for GPT-5.1 councilor agent. """ @@ -46,7 +48,7 @@ Remember: You are part of a council where multiple AI models will respond to the def get_gemini_councilor_prompt() -> str: """ Get system prompt for Gemini 3 Pro councilor. - + Returns: System prompt string for Gemini 3 Pro councilor agent. """ @@ -70,7 +72,7 @@ Remember: You are part of a council where multiple AI models will respond to the def get_claude_councilor_prompt() -> str: """ Get system prompt for Claude Sonnet 4.5 councilor. - + Returns: System prompt string for Claude Sonnet 4.5 councilor agent. """ @@ -94,7 +96,7 @@ Remember: You are part of a council where multiple AI models will respond to the def get_grok_councilor_prompt() -> str: """ Get system prompt for Grok-4 councilor. - + Returns: System prompt string for Grok-4 councilor agent. """ @@ -118,7 +120,7 @@ Remember: You are part of a council where multiple AI models will respond to the def get_chairman_prompt() -> str: """ Get system prompt for the Chairman agent. - + Returns: System prompt string for the Chairman agent. """ @@ -141,23 +143,27 @@ Your approach: Remember: You have access to all original responses and all evaluations. Use this rich context to create the best possible final answer.""" -def get_evaluation_prompt(query: str, responses: Dict[str, str], evaluator_name: str) -> str: +def get_evaluation_prompt( + query: str, responses: Dict[str, str], evaluator_name: str +) -> str: """ Create evaluation prompt for council members to review and rank responses. - + Args: query: The original user query responses: Dictionary mapping anonymous IDs to response texts evaluator_name: Name of the agent doing the evaluation - + Returns: Formatted evaluation prompt string """ - responses_text = "\n\n".join([ - f"Response {response_id}:\n{response_text}" - for response_id, response_text in responses.items() - ]) - + responses_text = "\n\n".join( + [ + f"Response {response_id}:\n{response_text}" + for response_id, response_text in responses.items() + ] + ) + return f"""You are evaluating responses from your fellow LLM Council members to the following query: QUERY: {query} @@ -191,30 +197,34 @@ def get_synthesis_prompt( query: str, original_responses: Dict[str, str], evaluations: Dict[str, str], - id_to_member: Dict[str, str] + id_to_member: Dict[str, str], ) -> str: """ Create synthesis prompt for the Chairman. - + Args: query: Original user query original_responses: Dict mapping member names to their responses evaluations: Dict mapping evaluator names to their evaluation texts id_to_member: Mapping from anonymous IDs to member names - + Returns: Formatted synthesis prompt """ - responses_section = "\n\n".join([ - f"=== {name} ===\n{response}" - for name, response in original_responses.items() - ]) - - evaluations_section = "\n\n".join([ - f"=== Evaluation by {name} ===\n{evaluation}" - for name, evaluation in evaluations.items() - ]) - + responses_section = "\n\n".join( + [ + f"=== {name} ===\n{response}" + for name, response in original_responses.items() + ] + ) + + evaluations_section = "\n\n".join( + [ + f"=== Evaluation by {name} ===\n{evaluation}" + for name, evaluation in evaluations.items() + ] + ) + return f"""As the Chairman of the LLM Council, synthesize the following information into a final, comprehensive answer. ORIGINAL QUERY: @@ -246,38 +256,46 @@ class LLMCouncil: """ An LLM Council that orchestrates multiple specialized agents to collaboratively answer queries through independent responses, peer review, and synthesis. - + The council follows this workflow: 1. Dispatch query to all council members in parallel 2. Collect all responses (anonymized) 3. Have each member review and rank all responses 4. Chairman synthesizes everything into final response """ - + def __init__( self, + id: str = swarm_id(), + name: str = "LLM Council", + description: str = "A collaborative council of LLM agents where each member independently answers a query, reviews and ranks anonymized peer responses, and a chairman synthesizes the best elements into a final answer.", council_members: Optional[List[Agent]] = None, chairman_model: str = "gpt-5.1", verbose: bool = True, + output_type: HistoryOutputType = "dict", ): """ Initialize the LLM Council. - + Args: council_members: List of Agent instances representing council members. If None, creates default council with GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4. chairman_model: Model name for the Chairman agent that synthesizes responses. verbose: Whether to print progress and intermediate results. + output_type: Format for the output. Options: "list", "dict", "string", "final", "json", "yaml", etc. """ + self.name = name + self.description = description self.verbose = verbose - + self.output_type = output_type + # Create default council members if none provided if council_members is None: self.council_members = self._create_default_council() else: self.council_members = council_members - + # Create Chairman agent self.chairman = Agent( agent_name="Chairman", @@ -289,19 +307,25 @@ class LLMCouncil: temperature=0.7, ) + self.conversation = Conversation(name=f"[LLM Council] [Conversation][{name}]") + if self.verbose: - print(f"šŸ›ļø LLM Council initialized with {len(self.council_members)} members") + print( + f"šŸ›ļø LLM Council initialized with {len(self.council_members)} members" + ) for i, member in enumerate(self.council_members, 1): - print(f" {i}. {member.agent_name} ({member.model_name})") - + print( + f" {i}. {member.agent_name} ({member.model_name})" + ) + def _create_default_council(self) -> List[Agent]: """ Create default council members with specialized prompts and models. - + Returns: List of Agent instances configured as council members. """ - + # GPT-5.1 Agent - Analytical and comprehensive gpt_agent = Agent( agent_name="GPT-5.1-Councilor", @@ -312,7 +336,7 @@ class LLMCouncil: verbose=False, temperature=0.7, ) - + # Gemini 3 Pro Agent - Concise and processed gemini_agent = Agent( agent_name="Gemini-3-Pro-Councilor", @@ -323,7 +347,7 @@ class LLMCouncil: verbose=False, temperature=0.7, ) - + # Claude Sonnet 4.5 Agent - Balanced and thoughtful claude_agent = Agent( agent_name="Claude-Sonnet-4.5-Councilor", @@ -335,7 +359,7 @@ class LLMCouncil: temperature=0.0, top_p=None, ) - + # Grok-4 Agent - Creative and innovative grok_agent = Agent( agent_name="Grok-4-Councilor", @@ -346,114 +370,135 @@ class LLMCouncil: verbose=False, temperature=0.8, ) - + members = [gpt_agent, gemini_agent, claude_agent, grok_agent] - + return members - - def run(self, query: str) -> Dict: + + def run(self, query: str): """ Execute the full LLM Council workflow. - + Args: query: The user's query to process - + Returns: - Dictionary containing: - - original_responses: Dict mapping member names to their responses - - evaluations: Dict mapping evaluator names to their rankings - - final_response: The Chairman's synthesized final answer + Formatted output based on output_type, containing conversation history + with all council member responses, evaluations, and final synthesis. """ if self.verbose: print(f"\n{'='*80}") print("šŸ›ļø LLM COUNCIL SESSION") - print("="*80) + print("=" * 80) print(f"\nšŸ“ Query: {query}\n") - + + # Add user query to conversation + self.conversation.add(role="User", content=query) + # Step 1: Get responses from all council members in parallel if self.verbose: print("šŸ“¤ Dispatching query to all council members...") - + results_dict = run_agents_concurrently( self.council_members, task=query, - return_agent_output_dict=True + return_agent_output_dict=True, ) - + # Map results to member names original_responses = { member.agent_name: response - for member, response in zip(self.council_members, - [results_dict.get(member.agent_name, "") - for member in self.council_members]) + for member, response in zip( + self.council_members, + [ + results_dict.get(member.agent_name, "") + for member in self.council_members + ], + ) } - + + # Add each council member's response to conversation + for member_name, response in original_responses.items(): + self.conversation.add(role=member_name, content=response) + if self.verbose: - print(f"āœ… Received {len(original_responses)} responses\n") + print( + f"āœ… Received {len(original_responses)} responses\n" + ) for name, response in original_responses.items(): print(f" {name}: {response[:100]}...") - + # Step 2: Anonymize responses for evaluation # Create anonymous IDs (A, B, C, D, etc.) - anonymous_ids = [chr(65 + i) for i in range(len(self.council_members))] + anonymous_ids = [ + chr(65 + i) for i in range(len(self.council_members)) + ] random.shuffle(anonymous_ids) # Shuffle to ensure anonymity - + anonymous_responses = { anonymous_ids[i]: original_responses[member.agent_name] for i, member in enumerate(self.council_members) } - + # Create mapping from anonymous ID to member name (for later reference) id_to_member = { anonymous_ids[i]: member.agent_name for i, member in enumerate(self.council_members) } - + if self.verbose: - print("\nšŸ” Council members evaluating each other's responses...") - + print( + "\nšŸ” Council members evaluating each other's responses..." + ) + # Step 3: Have each member evaluate and rank all responses concurrently # Create evaluation tasks for each member evaluation_tasks = [ - get_evaluation_prompt(query, anonymous_responses, member.agent_name) + get_evaluation_prompt( + query, anonymous_responses, member.agent_name + ) for member in self.council_members ] - + # Run evaluations concurrently using batched_grid_agent_execution evaluation_results = batched_grid_agent_execution( - self.council_members, - evaluation_tasks + self.council_members, evaluation_tasks ) - + # Map results to member names evaluations = { member.agent_name: evaluation_results[i] for i, member in enumerate(self.council_members) } - + + # Add each council member's evaluation to conversation + for member_name, evaluation in evaluations.items(): + self.conversation.add( + role=f"{member_name}-Evaluation", content=evaluation + ) + if self.verbose: print(f"āœ… Received {len(evaluations)} evaluations\n") - + # Step 4: Chairman synthesizes everything if self.verbose: print("šŸ‘” Chairman synthesizing final response...\n") - + synthesis_prompt = get_synthesis_prompt( query, original_responses, evaluations, id_to_member ) - + final_response = self.chairman.run(task=synthesis_prompt) - + + # Add chairman's final response to conversation + self.conversation.add(role="Chairman", content=final_response) + if self.verbose: print(f"{'='*80}") print("āœ… FINAL RESPONSE") print(f"{'='*80}\n") - - return { - "query": query, - "original_responses": original_responses, - "evaluations": evaluations, - "final_response": final_response, - "anonymous_mapping": id_to_member, - } + # Format and return output using history_output_formatter + return history_output_formatter( + conversation=self.conversation, type=self.output_type + ) diff --git a/tests/structs/test_auto_swarms_builder.py b/tests/structs/test_auto_swarms_builder.py index 768256e1..1d6e8762 100644 --- a/tests/structs/test_auto_swarms_builder.py +++ b/tests/structs/test_auto_swarms_builder.py @@ -168,7 +168,9 @@ def test_error_handling(): # Test with invalid agent configuration print("Testing invalid agent configuration...") try: - swarm.create_agents_from_specs({"agents": [{"agent_name": ""}]}) + swarm.create_agents_from_specs( + {"agents": [{"agent_name": ""}]} + ) print( "āœ— Should have raised an error for empty agent configuration" ) diff --git a/tests/structs/test_i_agent.py b/tests/structs/test_i_agent.py index 3edf9a8e..1c1f95c5 100644 --- a/tests/structs/test_i_agent.py +++ b/tests/structs/test_i_agent.py @@ -1,5 +1,3 @@ -import pytest - from swarms.agents.i_agent import IterativeReflectiveExpansion diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py index e4a48a20..99dd73ae 100644 --- a/tests/structs/test_sequential_workflow.py +++ b/tests/structs/test_sequential_workflow.py @@ -3,7 +3,6 @@ import pytest from swarms import Agent, SequentialWorkflow - def test_sequential_workflow_initialization_with_agents(): """Test SequentialWorkflow initialization with agents""" agent1 = Agent( From f703f2525b4d9c123a39df4b01f397fbe1d214d9 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 23 Nov 2025 23:28:54 -0800 Subject: [PATCH 15/16] Improved LLM Council docs --- docs/swarms/structs/llm_council.md | 61 +++++++++++++++++------------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/docs/swarms/structs/llm_council.md b/docs/swarms/structs/llm_council.md index 0f83b0d9..e1092bb4 100644 --- a/docs/swarms/structs/llm_council.md +++ b/docs/swarms/structs/llm_council.md @@ -86,10 +86,12 @@ Initializes the LLM Council with council members and a Chairman agent. Creates an LLM Council instance with specialized council members. If no members are provided, it creates a default council consisting of: -- **GPT-5.1-Councilor**: Analytical and comprehensive responses -- **Gemini-3-Pro-Councilor**: Concise and well-processed responses -- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced responses -- **Grok-4-Councilor**: Creative and innovative responses +| Council Member | Description | +|---------------------------------|------------------------------------------| +| **GPT-5.1-Councilor** | Analytical and comprehensive responses | +| **Gemini-3-Pro-Councilor** | Concise and well-processed responses | +| **Claude-Sonnet-4.5-Councilor** | Thoughtful and balanced responses | +| **Grok-4-Councilor** | Creative and innovative responses | The Chairman agent is automatically created with a specialized prompt for synthesizing responses. A `Conversation` object is also initialized to track all messages throughout the workflow, including user queries, council member responses, evaluations, and the final synthesis. @@ -137,17 +139,19 @@ Executes the full LLM Council workflow: parallel responses, anonymization, peer The return value depends on the `output_type` parameter set during initialization: -- **`"dict"`** (default): Returns conversation as a dictionary/list of message dictionaries -- **`"list"`**: Returns conversation as a list of formatted strings (`"role: content"`) -- **`"string"`** or **`"str"`**: Returns conversation as a formatted string -- **`"final"`** or **`"last"`**: Returns only the content of the final message (Chairman's response) -- **`"json"`**: Returns conversation as a JSON string -- **`"yaml"`**: Returns conversation as a YAML string -- **`"xml"`**: Returns conversation as an XML string -- **`"dict-all-except-first"`**: Returns all messages except the first as a dictionary -- **`"str-all-except-first"`**: Returns all messages except the first as a string -- **`"dict-final"`**: Returns the final message as a dictionary -- **`"list-final"`**: Returns the final message as a list +| `output_type` value | Description | +|---------------------------------|---------------------------------------------------------------------| +| **`"dict"`** (default) | Returns conversation as a dictionary/list of message dictionaries | +| **`"list"`** | Returns conversation as a list of formatted strings (`"role: content"`) | +| **`"string"`** or **`"str"`** | Returns conversation as a formatted string | +| **`"final"`** or **`"last"`** | Returns only the content of the final message (Chairman's response) | +| **`"json"`** | Returns conversation as a JSON string | +| **`"yaml"`** | Returns conversation as a YAML string | +| **`"xml"`** | Returns conversation as an XML string | +| **`"dict-all-except-first"`** | Returns all messages except the first as a dictionary | +| **`"str-all-except-first"`** | Returns all messages except the first as a string | +| **`"dict-final"`** | Returns the final message as a dictionary | +| **`"list-final"`** | Returns the final message as a list | #### Conversation Tracking @@ -354,11 +358,14 @@ The LLM Council is ideal for scenarios requiring: ### Common Applications -- **Medical Diagnosis**: Multiple medical AI agents provide diagnoses, evaluate each other, and synthesize recommendations -- **Financial Analysis**: Different financial experts analyze investments and rank each other's assessments -- **Legal Analysis**: Multiple legal perspectives evaluate compliance and risk -- **Business Strategy**: Diverse strategic viewpoints are synthesized into comprehensive plans -- **Research Analysis**: Multiple research perspectives are combined for thorough analysis +| Use Case | Description | +|-----------------------|--------------------------------------------------------------------------------------------------| +| **Medical Diagnosis** | Multiple medical AI agents provide diagnoses, evaluate each other, and synthesize recommendations | +| **Financial Analysis**| Different financial experts analyze investments and rank each other's assessments | +| **Legal Analysis** | Multiple legal perspectives evaluate compliance and risk | +| **Business Strategy** | Diverse strategic viewpoints are synthesized into comprehensive plans | +| **Research Analysis** | Multiple research perspectives are combined for thorough analysis | + ## Examples @@ -508,12 +515,14 @@ yaml_output = conversation.return_messages_as_dictionary() ## Performance Considerations -- **Parallel Execution**: Both response generation and evaluation phases run in parallel for efficiency -- **Anonymization**: Responses are anonymized to prevent bias in evaluation -- **Model Selection**: Different models can be used for different roles based on their strengths -- **Verbose Mode**: Can be disabled for production use to reduce output -- **Conversation Management**: Conversation object efficiently tracks all messages in memory and supports export to JSON/YAML files -- **Output Formatting**: Choose lightweight output formats (e.g., "final") for production to reduce memory usage +| Feature | Description | +|---------------------------|----------------------------------------------------------------------------------------------------------------| +| **Parallel Execution** | Both response generation and evaluation phases run in parallel for efficiency | +| **Anonymization** | Responses are anonymized to prevent bias in evaluation | +| **Model Selection** | Different models can be used for different roles based on their strengths | +| **Verbose Mode** | Can be disabled for production use to reduce output | +| **Conversation Management** | Conversation object efficiently tracks all messages in memory and supports export to JSON/YAML files | +| **Output Formatting** | Choose lightweight output formats (e.g., "final") for production to reduce memory usage | ## Related Documentation From f73b4f6b0711a7e90b96c414fd4fb7dd2a2cb619 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Mon, 24 Nov 2025 00:38:30 -0800 Subject: [PATCH 16/16] Integrate LLMCouncil into SwarmRouter --- docs/swarms/structs/swarm_router.md | 26 ++++++++++++++++++++++++++ swarms/structs/llm_council.py | 12 ++++++++++++ swarms/structs/swarm_router.py | 17 ++++++++++++++++- 3 files changed, 54 insertions(+), 1 deletion(-) diff --git a/docs/swarms/structs/swarm_router.md b/docs/swarms/structs/swarm_router.md index 8ccf1203..44bd1c8b 100644 --- a/docs/swarms/structs/swarm_router.md +++ b/docs/swarms/structs/swarm_router.md @@ -42,6 +42,7 @@ Main class for routing tasks to different swarm types. | `verbose` | bool | Flag to enable/disable verbose logging (default: False) | | `worker_tools` | List[Callable] | List of tools available to worker agents | | `aggregation_strategy` | str | Aggregation strategy for HeavySwarm (default: "synthesis") | +| `chairman_model` | str | Model name for the Chairman in LLMCouncil (default: "gpt-5.1") | ### Methods @@ -123,6 +124,7 @@ The `SwarmRouter` supports many various multi-agent architectures for various ap | `InteractiveGroupChat` | Interactive group chat with user participation | | `HeavySwarm` | Heavy swarm architecture with question and worker agents | | `BatchedGridWorkflow` | Batched grid workflow for parallel task processing | +| `LLMCouncil` | Council of specialized LLM agents with peer review and synthesis | | `auto` | Automatically selects best swarm type via embedding search | ## Basic Usage @@ -456,6 +458,30 @@ result = batched_grid_router.run(tasks=["Task 1", "Task 2", "Task 3"]) BatchedGridWorkflow is designed for efficiently processing multiple tasks in parallel batches, optimizing resource utilization. +### LLMCouncil + +Use Case: Collaborative analysis with multiple specialized LLM agents that evaluate each other's responses and synthesize a final answer. + +```python +llm_council_router = SwarmRouter( + name="LLMCouncil", + description="Collaborative council of LLM agents with peer review", + swarm_type="LLMCouncil", + chairman_model="gpt-5.1", # Model for the Chairman agent + output_type="dict", # Output format: "dict", "list", "string", "json", "yaml", "final", etc. + verbose=True # Show progress and intermediate results +) + +result = llm_council_router.run("What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?") +``` + +LLMCouncil creates a council of specialized agents (GPT-5.1, Gemini, Claude, Grok by default) that: +1. Each independently responds to the query +2. Evaluates and ranks each other's anonymized responses +3. A Chairman synthesizes all responses and evaluations into a final comprehensive answer + +The council automatically tracks all messages in a conversation object and supports flexible output formats. Note: LLMCouncil uses default council members and doesn't require the `agents` parameter. + ## Advanced Features ### Processing Documents diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py index fa2a6ab5..a303fa12 100644 --- a/swarms/structs/llm_council.py +++ b/swarms/structs/llm_council.py @@ -502,3 +502,15 @@ class LLMCouncil: return history_output_formatter( conversation=self.conversation, type=self.output_type ) + + def batched_run(self, tasks: List[str]): + """ + Run the LLM Council workflow for a batch of tasks. + + Args: + tasks: List of tasks to process + + Returns: + List of formatted outputs based on output_type + """ + return [self.run(task) for task in tasks] \ No newline at end of file diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index 92903f57..dd13ee08 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -37,6 +37,7 @@ from swarms.telemetry.log_executions import log_execution from swarms.utils.generate_keys import generate_api_key from swarms.utils.loguru_logger import initialize_logger from swarms.utils.output_types import OutputType +from swarms.structs.llm_council import LLMCouncil logger = initialize_logger(log_folder="swarm_router") @@ -56,6 +57,7 @@ SwarmType = Literal[ "InteractiveGroupChat", "HeavySwarm", "BatchedGridWorkflow", + "LLMCouncil", ] @@ -210,6 +212,7 @@ class SwarmRouter: verbose: bool = False, worker_tools: List[Callable] = None, aggregation_strategy: str = "synthesis", + chairman_model: str = "gpt-5.1", *args, **kwargs, ): @@ -252,7 +255,8 @@ class SwarmRouter: self.heavy_swarm_swarm_show_output = ( heavy_swarm_swarm_show_output ) - + self.chairman_model = chairman_model + # Initialize swarm factory for O(1) lookup performance self._swarm_factory = self._initialize_swarm_factory() self._swarm_cache = {} # Cache for created swarms @@ -425,6 +429,7 @@ class SwarmRouter: "SequentialWorkflow": self._create_sequential_workflow, "ConcurrentWorkflow": self._create_concurrent_workflow, "BatchedGridWorkflow": self._create_batched_grid_workflow, + "LLMCouncil": self._create_llm_council, } def _create_heavy_swarm(self, *args, **kwargs): @@ -441,6 +446,16 @@ class SwarmRouter: aggregation_strategy=self.aggregation_strategy, show_dashboard=False, ) + + def _create_llm_council(self, *args, **kwargs): + """Factory function for LLMCouncil.""" + return LLMCouncil( + name=self.name, + description=self.description, + output_type=self.output_type, + verbose=self.verbose, + chairman_model=self.chairman_model, + ) def _create_agent_rearrange(self, *args, **kwargs): """Factory function for AgentRearrange."""