fixed merge conflicts

pull/1242/head
Hugh 3 weeks ago
commit dda500e1f2

@ -125,6 +125,7 @@ The `SwarmRouter` supports many various multi-agent architectures for various ap
| `HeavySwarm` | Heavy swarm architecture with question and worker agents | | `HeavySwarm` | Heavy swarm architecture with question and worker agents |
| `BatchedGridWorkflow` | Batched grid workflow for parallel task processing | | `BatchedGridWorkflow` | Batched grid workflow for parallel task processing |
| `LLMCouncil` | Council of specialized LLM agents with peer review and synthesis | | `LLMCouncil` | Council of specialized LLM agents with peer review and synthesis |
| `DebateWithJudge` | Debate architecture with Pro/Con agents and a Judge for self-refinement |
| `auto` | Automatically selects best swarm type via embedding search | | `auto` | Automatically selects best swarm type via embedding search |
## Basic Usage ## Basic Usage
@ -482,6 +483,64 @@ LLMCouncil creates a council of specialized agents (GPT-5.1, Gemini, Claude, Gro
The council automatically tracks all messages in a conversation object and supports flexible output formats. Note: LLMCouncil uses default council members and doesn't require the `agents` parameter. The council automatically tracks all messages in a conversation object and supports flexible output formats. Note: LLMCouncil uses default council members and doesn't require the `agents` parameter.
### DebateWithJudge
Use Case: Structured debate architecture where two agents (Pro and Con) present opposing arguments, and a Judge agent evaluates and synthesizes the arguments over multiple rounds to progressively refine the answer.
```python
from swarms import Agent, SwarmRouter
# Create three specialized agents for the debate
pro_agent = Agent(
agent_name="Pro-Agent",
system_prompt="You are an expert at presenting strong, well-reasoned arguments in favor of positions. "
"You provide compelling evidence and logical reasoning to support your stance.",
model_name="gpt-4.1",
max_loops=1,
)
con_agent = Agent(
agent_name="Con-Agent",
system_prompt="You are an expert at presenting strong, well-reasoned counter-arguments. "
"You identify weaknesses in opposing arguments and present compelling evidence against positions.",
model_name="gpt-4.1",
max_loops=1,
)
judge_agent = Agent(
agent_name="Judge-Agent",
system_prompt="You are an impartial judge evaluating debates. You carefully assess both arguments, "
"identify strengths and weaknesses, and provide refined synthesis that incorporates "
"the best elements from both sides.",
model_name="gpt-4.1",
max_loops=1,
)
# Initialize the SwarmRouter with DebateWithJudge
debate_router = SwarmRouter(
name="DebateWithJudge",
description="Structured debate with Pro/Con agents and Judge for self-refinement",
swarm_type="DebateWithJudge",
agents=[pro_agent, con_agent, judge_agent], # Must be exactly 3 agents
max_loops=3, # Number of debate rounds
output_type="str-all-except-first", # Output format
verbose=True # Show progress and intermediate results
)
# Run a debate on a topic
result = debate_router.run(
"Should artificial intelligence development be regulated by governments?"
)
```
DebateWithJudge implements a multi-round debate system where:
1. **Pro Agent** presents arguments in favor of the topic
2. **Con Agent** presents counter-arguments against the topic
3. **Judge Agent** evaluates both arguments and provides synthesis
4. The process repeats for N rounds (specified by `max_loops`), with each round refining the discussion based on the judge's feedback
The architecture progressively improves the answer through iterative refinement, making it ideal for complex topics requiring thorough analysis from multiple perspectives. Note: DebateWithJudge requires exactly 3 agents (pro_agent, con_agent, judge_agent) in that order.
## Advanced Features ## Advanced Features
### Processing Documents ### Processing Documents

@ -19,6 +19,8 @@ pytest
networkx networkx
aiofiles aiofiles
httpx httpx
requests
litellm
# vllm>=0.2.0 # vllm>=0.2.0
aiohttp aiohttp
mcp mcp

@ -23,9 +23,10 @@ from swarms.structs.agent_rearrange import AgentRearrange
from swarms.structs.batched_grid_workflow import BatchedGridWorkflow from swarms.structs.batched_grid_workflow import BatchedGridWorkflow
from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.council_as_judge import CouncilAsAJudge from swarms.structs.council_as_judge import CouncilAsAJudge
from swarms.structs.debate_with_judge import DebateWithJudge
from swarms.structs.groupchat import GroupChat from swarms.structs.groupchat import GroupChat
from swarms.structs.heavy_swarm import HeavySwarm from swarms.structs.heavy_swarm import HeavySwarm
from swarms.structs.hiearchical_swarm import HierarchicalSwarm from swarms.structs.hierarchical_swarm import HierarchicalSwarm
from swarms.structs.interactive_groupchat import InteractiveGroupChat from swarms.structs.interactive_groupchat import InteractiveGroupChat
from swarms.structs.ma_utils import list_all_agents from swarms.structs.ma_utils import list_all_agents
from swarms.structs.majority_voting import MajorityVoting from swarms.structs.majority_voting import MajorityVoting
@ -49,7 +50,7 @@ SwarmType = Literal[
"GroupChat", "GroupChat",
"MultiAgentRouter", "MultiAgentRouter",
"AutoSwarmBuilder", "AutoSwarmBuilder",
"HiearchicalSwarm", "HierarchicalSwarm",
"auto", "auto",
"MajorityVoting", "MajorityVoting",
"MALT", "MALT",
@ -58,6 +59,7 @@ SwarmType = Literal[
"HeavySwarm", "HeavySwarm",
"BatchedGridWorkflow", "BatchedGridWorkflow",
"LLMCouncil", "LLMCouncil",
"DebateWithJudge",
] ]
@ -306,12 +308,23 @@ class SwarmRouter:
if ( if (
self.swarm_type != "HeavySwarm" self.swarm_type != "HeavySwarm"
and self.swarm_type != "DebateWithJudge"
and self.agents is None and self.agents is None
): ):
raise SwarmRouterConfigError( raise SwarmRouterConfigError(
"SwarmRouter: No agents provided for the swarm. Check the docs to learn of required parameters. https://docs.swarms.world/en/latest/swarms/structs/agent/" "SwarmRouter: No agents provided for the swarm. Check the docs to learn of required parameters. https://docs.swarms.world/en/latest/swarms/structs/agent/"
) )
if self.swarm_type == "DebateWithJudge":
if self.agents is None or len(self.agents) != 3:
raise SwarmRouterConfigError(
"SwarmRouter: DebateWithJudge requires exactly 3 agents: "
"pro_agent (arguing in favor), con_agent (arguing against), "
"and judge_agent (evaluating and synthesizing). "
f"Provided {len(self.agents) if self.agents else 0} agent(s). "
"Check the docs: https://docs.swarms.world/en/latest/swarms/structs/swarm_router/"
)
if ( if (
self.swarm_type == "AgentRearrange" self.swarm_type == "AgentRearrange"
and self.rearrange_flow is None and self.rearrange_flow is None
@ -421,7 +434,7 @@ class SwarmRouter:
"MALT": self._create_malt, "MALT": self._create_malt,
"CouncilAsAJudge": self._create_council_as_judge, "CouncilAsAJudge": self._create_council_as_judge,
"InteractiveGroupChat": self._create_interactive_group_chat, "InteractiveGroupChat": self._create_interactive_group_chat,
"HiearchicalSwarm": self._create_hierarchical_swarm, "HierarchicalSwarm": self._create_hierarchical_swarm,
"MixtureOfAgents": self._create_mixture_of_agents, "MixtureOfAgents": self._create_mixture_of_agents,
"MajorityVoting": self._create_majority_voting, "MajorityVoting": self._create_majority_voting,
"GroupChat": self._create_group_chat, "GroupChat": self._create_group_chat,
@ -430,6 +443,7 @@ class SwarmRouter:
"ConcurrentWorkflow": self._create_concurrent_workflow, "ConcurrentWorkflow": self._create_concurrent_workflow,
"BatchedGridWorkflow": self._create_batched_grid_workflow, "BatchedGridWorkflow": self._create_batched_grid_workflow,
"LLMCouncil": self._create_llm_council, "LLMCouncil": self._create_llm_council,
"DebateWithJudge": self._create_debate_with_judge,
} }
def _create_heavy_swarm(self, *args, **kwargs): def _create_heavy_swarm(self, *args, **kwargs):
@ -457,6 +471,17 @@ class SwarmRouter:
chairman_model=self.chairman_model, chairman_model=self.chairman_model,
) )
def _create_debate_with_judge(self, *args, **kwargs):
"""Factory function for DebateWithJudge."""
return DebateWithJudge(
pro_agent=self.agents[0],
con_agent=self.agents[1],
judge_agent=self.agents[2],
max_rounds=self.max_loops,
output_type=self.output_type,
verbose=self.verbose,
)
def _create_agent_rearrange(self, *args, **kwargs): def _create_agent_rearrange(self, *args, **kwargs):
"""Factory function for AgentRearrange.""" """Factory function for AgentRearrange."""
return AgentRearrange( return AgentRearrange(

@ -0,0 +1,262 @@
"""
Test file for LLM Council functionality.
Tests core functionalities of the LLM Council including:
- Initialization (default and custom)
- Running queries
- Batch processing
- Output formatting
"""
import pytest
from loguru import logger
from dotenv import load_dotenv
from swarms.structs.llm_council import LLMCouncil
from swarms.structs.agent import Agent
load_dotenv()
def test_llm_council_default_initialization():
"""Test LLM Council initialization with default council members."""
try:
logger.info("Testing LLM Council default initialization...")
council = LLMCouncil(
verbose=False,
output_type="dict-all-except-first"
)
assert council is not None, "Council should be initialized"
assert council.name == "LLM Council", "Default name should be 'LLM Council'"
assert len(council.council_members) > 0, "Should have council members"
assert council.chairman is not None, "Chairman should be initialized"
assert council.conversation is not None, "Conversation should be initialized"
logger.info(f"✓ Council initialized with {len(council.council_members)} members")
logger.info("✓ Default initialization test passed")
except Exception as e:
logger.error(f"✗ Default initialization test failed: {e}")
raise
def test_llm_council_custom_initialization():
"""Test LLM Council initialization with custom council members."""
try:
logger.info("Testing LLM Council custom initialization...")
# Create custom council members with simpler models
custom_members = [
Agent(
agent_name="TestAgent1",
agent_description="First test agent",
system_prompt="You are a helpful test agent.",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
),
Agent(
agent_name="TestAgent2",
agent_description="Second test agent",
system_prompt="You are a helpful test agent.",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
),
]
council = LLMCouncil(
name="Custom Council",
council_members=custom_members,
chairman_model="gpt-4o-mini",
verbose=False,
output_type="string"
)
assert council is not None, "Council should be initialized"
assert council.name == "Custom Council", "Name should match custom value"
assert len(council.council_members) == 2, "Should have 2 custom members"
assert council.council_members[0].agent_name == "TestAgent1", "First member should match"
assert council.council_members[1].agent_name == "TestAgent2", "Second member should match"
assert council.output_type == "string", "Output type should be 'string'"
logger.info("✓ Custom initialization test passed")
except Exception as e:
logger.error(f"✗ Custom initialization test failed: {e}")
raise
def test_llm_council_run():
"""Test LLM Council run method with a simple query."""
try:
logger.info("Testing LLM Council run method...")
# Use simpler models for testing
custom_members = [
Agent(
agent_name="TestAgent1",
agent_description="First test agent",
system_prompt="You are a helpful test agent. Provide concise answers.",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
),
Agent(
agent_name="TestAgent2",
agent_description="Second test agent",
system_prompt="You are a helpful test agent. Provide concise answers.",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
),
]
council = LLMCouncil(
council_members=custom_members,
chairman_model="gpt-4o-mini",
verbose=False,
output_type="dict-all-except-first"
)
query = "What is 2 + 2? Provide a brief answer."
result = council.run(query)
# Basic assertions
assert result is not None, "Result should not be None"
assert council.conversation is not None, "Conversation should exist"
assert len(council.conversation.conversation_history) > 0, "Conversation should have messages"
# Enhanced assertions to verify workflow steps
messages = council.conversation.conversation_history
# Step 1: Verify User query was added
user_messages = [msg for msg in messages if msg.get("role") == "User"]
assert len(user_messages) > 0, "User query should be in conversation"
# Step 2: Verify all council members responded
member_responses = [msg for msg in messages if msg.get("role") in ["TestAgent1", "TestAgent2"]]
assert len(member_responses) == len(custom_members), f"All {len(custom_members)} council members should have responded"
# Step 3: Verify evaluations were performed
evaluation_messages = [msg for msg in messages if "-Evaluation" in msg.get("role", "")]
assert len(evaluation_messages) == len(custom_members), f"All {len(custom_members)} members should have evaluated"
# Step 4: Verify Chairman synthesis occurred
chairman_messages = [msg for msg in messages if msg.get("role") == "Chairman"]
assert len(chairman_messages) > 0, "Chairman should have synthesized final response"
logger.info("✓ Run method test passed")
logger.info(f"✓ Verified {len(member_responses)} member responses, {len(evaluation_messages)} evaluations, and {len(chairman_messages)} chairman synthesis")
except Exception as e:
logger.error(f"✗ Run method test failed: {e}")
raise
def test_llm_council_batched_run():
"""Test LLM Council batched_run method with multiple tasks."""
try:
logger.info("Testing LLM Council batched_run method...")
# Use simpler models for testing
custom_members = [
Agent(
agent_name="TestAgent1",
agent_description="First test agent",
system_prompt="You are a helpful test agent. Provide concise answers.",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
),
Agent(
agent_name="TestAgent2",
agent_description="Second test agent",
system_prompt="You are a helpful test agent. Provide concise answers.",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
),
]
council = LLMCouncil(
council_members=custom_members,
chairman_model="gpt-4o-mini",
verbose=False,
output_type="dict-all-except-first"
)
tasks = [
"What is 1 + 1?",
"What is 3 + 3?",
]
results = council.batched_run(tasks)
assert results is not None, "Results should not be None"
assert len(results) == len(tasks), f"Should have {len(tasks)} results"
assert all(result is not None for result in results), "All results should not be None"
logger.info(f"✓ Batched run test passed with {len(results)} results")
except Exception as e:
logger.error(f"✗ Batched run test failed: {e}")
raise
def test_llm_council_output_types():
"""Test LLM Council with different output types."""
try:
logger.info("Testing LLM Council with different output types...")
# Use simpler models for testing
custom_members = [
Agent(
agent_name="TestAgent1",
agent_description="First test agent",
system_prompt="You are a helpful test agent. Provide concise answers.",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
),
Agent(
agent_name="TestAgent2",
agent_description="Second test agent",
system_prompt="You are a helpful test agent. Provide concise answers.",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
),
]
output_types = ["string", "dict-all-except-first", "final"]
for output_type in output_types:
logger.info(f"Testing output type: {output_type}")
council = LLMCouncil(
council_members=custom_members,
chairman_model="gpt-4o-mini",
verbose=False,
output_type=output_type
)
query = "What is 5 + 5? Provide a brief answer."
result = council.run(query)
assert result is not None, f"Result should not be None for output type {output_type}"
assert council.output_type == output_type, f"Output type should be {output_type}"
logger.info(f"✓ Output type '{output_type}' test passed")
logger.info("✓ All output types test passed")
except Exception as e:
logger.error(f"✗ Output types test failed: {e}")
raise
if __name__ == "__main__":
pytest.main([__file__, "-v"])
Loading…
Cancel
Save