diff --git a/docs/swarms/structs/llm_council.md b/docs/swarms/structs/llm_council.md
index 6352bcef..0f83b0d9 100644
--- a/docs/swarms/structs/llm_council.md
+++ b/docs/swarms/structs/llm_council.md
@@ -2,61 +2,35 @@
```mermaid
flowchart TD
- A[User Query] --> B[LLM Council Initialization]
- B --> C{Council Members Provided?}
- C -->|No| D[Create Default Council]
- C -->|Yes| E[Use Provided Members]
- D --> F[Step 1: Parallel Response Generation]
- E --> F
+ A[User Query] --> B[Council Members]
- subgraph "Default Council Members"
- G1[GPT-5.1-Councilor
Analytical & Comprehensive]
- G2[Gemini-3-Pro-Councilor
Concise & Structured]
- G3[Claude-Sonnet-4.5-Councilor
Thoughtful & Balanced]
- G4[Grok-4-Councilor
Creative & Innovative]
+ subgraph "Council Members"
+ C1[GPT-5.1-Councilor]
+ C2[Gemini-3-Pro-Councilor]
+ C3[Claude-Sonnet-4.5-Councilor]
+ C4[Grok-4-Councilor]
end
- F --> G1
- F --> G2
- F --> G3
- F --> G4
+ B --> C1
+ B --> C2
+ B --> C3
+ B --> C4
- G1 --> H[Collect All Responses]
- G2 --> H
- G3 --> H
- G4 --> H
+ C1 --> D[Responses]
+ C2 --> D
+ C3 --> D
+ C4 --> D
- H --> I[Step 2: Anonymize Responses]
- I --> J[Assign Anonymous IDs: A, B, C, D...]
-
- J --> K[Step 3: Parallel Evaluation]
-
- subgraph "Evaluation Phase"
- K --> L1[Member 1 Evaluates All]
- K --> L2[Member 2 Evaluates All]
- K --> L3[Member 3 Evaluates All]
- K --> L4[Member 4 Evaluates All]
- end
-
- L1 --> M[Collect Evaluations & Rankings]
- L2 --> M
- L3 --> M
- L4 --> M
-
- M --> N[Step 4: Chairman Synthesis]
- N --> O[Chairman Agent]
- O --> P[Final Synthesized Response]
-
- P --> Q[Return Results Dictionary]
-
- style A fill:#e1f5ff
- style P fill:#c8e6c9
- style Q fill:#c8e6c9
- style O fill:#fff9c4
+ D --> E[Anonymize & Evaluate]
+ E --> F[Chairman Synthesis]
+ F --> G[Final Response]
+
```
The `LLMCouncil` class orchestrates multiple specialized LLM agents to collaboratively answer queries through a structured peer review and synthesis process. Inspired by Andrej Karpathy's llm-council implementation, this architecture demonstrates how different models evaluate and rank each other's work, often selecting responses from other models as superior to their own.
+The class automatically tracks all agent messages in a `Conversation` object and formats output using `history_output_formatter`, providing flexible output formats including dictionaries, lists, strings, JSON, YAML, and more.
+
## Workflow Overview
The LLM Council follows a four-step process:
@@ -80,6 +54,8 @@ class LLMCouncil:
|-----------|------|-------------|---------|
| `council_members` | `List[Agent]` | List of Agent instances representing council members | `None` (creates default council) |
| `chairman` | `Agent` | The Chairman agent responsible for synthesizing responses | Created during initialization |
+| `conversation` | `Conversation` | Conversation object tracking all messages throughout the workflow | Created during initialization |
+| `output_type` | `HistoryOutputType` | Format for the output (e.g., "dict", "list", "string", "json", "yaml") | `"dict"` |
| `verbose` | `bool` | Whether to print progress and intermediate results | `True` |
## Methods
@@ -92,9 +68,13 @@ Initializes the LLM Council with council members and a Chairman agent.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
+| `id` | `str` | `swarm_id()` | Unique identifier for the council instance. |
+| `name` | `str` | `"LLM Council"` | Name of the council instance. |
+| `description` | `str` | `"A collaborative council..."` | Description of the council's purpose. |
| `council_members` | `Optional[List[Agent]]` | `None` | List of Agent instances representing council members. If `None`, creates default council with GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4. |
| `chairman_model` | `str` | `"gpt-5.1"` | Model name for the Chairman agent that synthesizes responses. |
| `verbose` | `bool` | `True` | Whether to print progress and intermediate results. |
+| `output_type` | `HistoryOutputType` | `"dict"` | Format for the output. Options: "list", "dict", "string", "final", "json", "yaml", "xml", "dict-all-except-first", "str-all-except-first", "dict-final", "list-final". |
#### Returns
@@ -105,12 +85,13 @@ Initializes the LLM Council with council members and a Chairman agent.
#### Description
Creates an LLM Council instance with specialized council members. If no members are provided, it creates a default council consisting of:
+
- **GPT-5.1-Councilor**: Analytical and comprehensive responses
- **Gemini-3-Pro-Councilor**: Concise and well-processed responses
- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced responses
- **Grok-4-Councilor**: Creative and innovative responses
-The Chairman agent is automatically created with a specialized prompt for synthesizing responses.
+The Chairman agent is automatically created with a specialized prompt for synthesizing responses. A `Conversation` object is also initialized to track all messages throughout the workflow, including user queries, council member responses, evaluations, and the final synthesis.
#### Example Usage
@@ -120,7 +101,7 @@ from swarms.structs.llm_council import LLMCouncil
# Create council with default members
council = LLMCouncil(verbose=True)
-# Create council with custom members
+# Create council with custom members and output format
from swarms import Agent
custom_members = [
Agent(agent_name="Expert-1", model_name="gpt-4", max_loops=1),
@@ -129,7 +110,8 @@ custom_members = [
council = LLMCouncil(
council_members=custom_members,
chairman_model="gpt-4",
- verbose=True
+ verbose=True,
+ output_type="json" # Output as JSON string
)
```
@@ -137,7 +119,7 @@ council = LLMCouncil(
### `run`
-Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis.
+Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis. All messages are tracked in the conversation object and formatted according to the `output_type` setting.
#### Parameters
@@ -149,54 +131,79 @@ Executes the full LLM Council workflow: parallel responses, anonymization, peer
| Type | Description |
|------|-------------|
-| `Dict` | Dictionary containing the following keys: |
+| `Union[List, Dict, str]` | Formatted output based on `output_type`. The output contains the conversation history with all messages tracked throughout the workflow. |
-#### Return Dictionary Structure
+#### Output Format
-| Key | Type | Description |
-|-----|------|-------------|
-| `query` | `str` | The original user query. |
-| `original_responses` | `Dict[str, str]` | Dictionary mapping council member names to their original responses. |
-| `evaluations` | `Dict[str, str]` | Dictionary mapping evaluator names to their evaluation texts (rankings and reasoning). |
-| `final_response` | `str` | The Chairman's synthesized final answer combining all perspectives. |
-| `anonymous_mapping` | `Dict[str, str]` | Mapping from anonymous IDs (A, B, C, D) to member names for reference. |
+The return value depends on the `output_type` parameter set during initialization:
+
+- **`"dict"`** (default): Returns conversation as a dictionary/list of message dictionaries
+- **`"list"`**: Returns conversation as a list of formatted strings (`"role: content"`)
+- **`"string"`** or **`"str"`**: Returns conversation as a formatted string
+- **`"final"`** or **`"last"`**: Returns only the content of the final message (Chairman's response)
+- **`"json"`**: Returns conversation as a JSON string
+- **`"yaml"`**: Returns conversation as a YAML string
+- **`"xml"`**: Returns conversation as an XML string
+- **`"dict-all-except-first"`**: Returns all messages except the first as a dictionary
+- **`"str-all-except-first"`**: Returns all messages except the first as a string
+- **`"dict-final"`**: Returns the final message as a dictionary
+- **`"list-final"`**: Returns the final message as a list
+
+#### Conversation Tracking
+
+All messages are automatically tracked in the conversation object with the following roles:
+
+- **`"User"`**: The original user query
+- **`"{member_name}"`**: Each council member's response (e.g., "GPT-5.1-Councilor")
+- **`"{member_name}-Evaluation"`**: Each council member's evaluation (e.g., "GPT-5.1-Councilor-Evaluation")
+- **`"Chairman"`**: The final synthesized response
#### Description
Executes the complete LLM Council workflow:
-1. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently`
-2. **Collection Phase**: Collects all responses and maps them to member names
-3. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity
-4. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution`
-5. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer
+1. **User Query Tracking**: Adds the user query to the conversation as "User" role
+2. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently`
+3. **Collection Phase**: Collects all responses, maps them to member names, and adds each to the conversation with the member's name as the role
+4. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity
+5. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution`, then adds evaluations to the conversation with "{member_name}-Evaluation" as the role
+6. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer, which is added to the conversation as "Chairman" role
+7. **Output Formatting**: Returns the conversation formatted according to the `output_type` setting using `history_output_formatter`
-The method provides verbose output by default, showing progress at each stage.
+The method provides verbose output by default, showing progress at each stage. All messages are tracked in the `conversation` attribute for later access or export.
#### Example Usage
```python
from swarms.structs.llm_council import LLMCouncil
+# Create council with default output format (dict)
council = LLMCouncil(verbose=True)
query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?"
+# Run the council - returns formatted conversation based on output_type
result = council.run(query)
-# Access the final synthesized response
-print(result["final_response"])
+# With default "dict" output_type, result is a list of message dictionaries
+# Access conversation messages
+for message in result:
+ print(f"{message['role']}: {message['content'][:200]}...")
+
+# Access the conversation object directly for more control
+conversation = council.conversation
+print("\nFinal message:", conversation.get_final_message_content())
-# Access individual member responses
-for name, response in result["original_responses"].items():
- print(f"{name}: {response[:200]}...")
+# Get conversation as string
+print("\nFull conversation:")
+print(conversation.get_str())
-# Access evaluation rankings
-for evaluator, evaluation in result["evaluations"].items():
- print(f"{evaluator} evaluation:\n{evaluation[:300]}...")
+# Example with different output types
+council_json = LLMCouncil(output_type="json", verbose=False)
+result_json = council_json.run(query) # Returns JSON string
-# Check anonymous mapping
-print("Anonymous IDs:", result["anonymous_mapping"])
+council_final = LLMCouncil(output_type="final", verbose=False)
+result_final = council_final.run(query) # Returns only final response string
```
---
@@ -225,6 +232,7 @@ Internal method that creates the default council configuration with four special
- **Grok-4-Councilor** (`model_name="x-ai/grok-4"`): Creative and innovative, temperature=0.8
Each agent is configured with:
+
- Specialized system prompts matching their role
- `max_loops=1` for single-response generation
- `verbose=False` to reduce noise during parallel execution
@@ -367,25 +375,40 @@ For comprehensive examples demonstrating various use cases, see the [LLM Council
```python
from swarms.structs.llm_council import LLMCouncil
-# Create the council
+# Create the council with default output format
council = LLMCouncil(verbose=True)
# Example query
query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?"
-# Run the council
+# Run the council - returns formatted conversation
result = council.run(query)
-# Print final response
-print(result["final_response"])
+# With default "dict" output_type, result is a list of message dictionaries
+# Print all messages
+for message in result:
+ role = message['role']
+ content = message['content']
+ print(f"\n{role}:")
+ print(content[:500] + "..." if len(content) > 500 else content)
-# Optionally print evaluations
-print("\n\n" + "="*80)
-print("EVALUATIONS")
+# Access conversation object directly for more options
+conversation = council.conversation
+
+# Get only the final response
+print("\n" + "="*80)
+print("FINAL RESPONSE")
print("="*80)
-for name, evaluation in result["evaluations"].items():
- print(f"\n{name}:")
- print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation)
+print(conversation.get_final_message_content())
+
+# Get conversation as formatted string
+print("\n" + "="*80)
+print("FULL CONVERSATION")
+print("="*80)
+print(conversation.get_str())
+
+# Export conversation to JSON
+conversation.export()
```
## Customization
@@ -428,6 +451,50 @@ council = LLMCouncil(
)
```
+### Custom Output Format
+
+You can control the output format using the `output_type` parameter:
+
+```python
+# Get output as JSON string
+council = LLMCouncil(output_type="json")
+result = council.run(query) # Returns JSON string
+
+# Get only the final response
+council = LLMCouncil(output_type="final")
+result = council.run(query) # Returns only final response string
+
+# Get as YAML
+council = LLMCouncil(output_type="yaml")
+result = council.run(query) # Returns YAML string
+
+# Get as formatted string
+council = LLMCouncil(output_type="string")
+result = council.run(query) # Returns formatted conversation string
+```
+
+### Accessing Conversation History
+
+The conversation object is accessible for advanced usage:
+
+```python
+council = LLMCouncil()
+council.run(query)
+
+# Access conversation directly
+conversation = council.conversation
+
+# Get conversation history
+history = conversation.conversation_history
+
+# Export to file
+conversation.export() # Saves to default location
+
+# Get specific format
+json_output = conversation.to_json()
+yaml_output = conversation.return_messages_as_dictionary()
+```
+
## Architecture Benefits
1. **Diversity**: Multiple models provide varied perspectives and approaches
@@ -436,6 +503,8 @@ council = LLMCouncil(
4. **Transparency**: Full visibility into individual responses and evaluation rankings
5. **Scalability**: Easy to add or remove council members
6. **Flexibility**: Supports custom agents and models
+7. **Conversation Tracking**: All messages are automatically tracked in a Conversation object for history and export
+8. **Flexible Output**: Multiple output formats supported via `history_output_formatter` (dict, list, string, JSON, YAML, XML, etc.)
## Performance Considerations
@@ -443,11 +512,14 @@ council = LLMCouncil(
- **Anonymization**: Responses are anonymized to prevent bias in evaluation
- **Model Selection**: Different models can be used for different roles based on their strengths
- **Verbose Mode**: Can be disabled for production use to reduce output
+- **Conversation Management**: Conversation object efficiently tracks all messages in memory and supports export to JSON/YAML files
+- **Output Formatting**: Choose lightweight output formats (e.g., "final") for production to reduce memory usage
## Related Documentation
- [Multi-Agent Architectures Overview](overview.md)
- [Council of Judges](council_of_judges.md) - Similar peer review pattern
- [Agent Class Reference](agent.md) - Understanding individual agents
+- [Conversation Class Reference](conversation.md) - Understanding conversation tracking and management
- [Multi-Agent Execution Utilities](various_execution_methods.md) - Underlying execution methods
-
+- [History Output Formatter](../../../swarms/utils/history_output_formatter.py) - Output formatting utilities
diff --git a/examples/multi_agent/llm_council_examples/business_strategy_council.py b/examples/multi_agent/llm_council_examples/business_strategy_council.py
index bacc8995..10b5087b 100644
--- a/examples/multi_agent/llm_council_examples/business_strategy_council.py
+++ b/examples/multi_agent/llm_council_examples/business_strategy_council.py
@@ -29,4 +29,3 @@ result = council.run(query)
# Print final response
print(result["final_response"])
-
diff --git a/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py
index b69ffb70..7e85d851 100644
--- a/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py
+++ b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py
@@ -27,4 +27,3 @@ result = council.run(query)
# Print final response
print(result["final_response"])
-
diff --git a/examples/multi_agent/llm_council_examples/finance_analysis_council.py b/examples/multi_agent/llm_council_examples/finance_analysis_council.py
index d1f4c9a5..f014be47 100644
--- a/examples/multi_agent/llm_council_examples/finance_analysis_council.py
+++ b/examples/multi_agent/llm_council_examples/finance_analysis_council.py
@@ -27,4 +27,3 @@ result = council.run(query)
# Print final response
print(result["final_response"])
-
diff --git a/examples/multi_agent/llm_council_examples/legal_analysis_council.py b/examples/multi_agent/llm_council_examples/legal_analysis_council.py
index 01bdcdc8..5ea3481e 100644
--- a/examples/multi_agent/llm_council_examples/legal_analysis_council.py
+++ b/examples/multi_agent/llm_council_examples/legal_analysis_council.py
@@ -29,4 +29,3 @@ result = council.run(query)
# Print final response
print(result["final_response"])
-
diff --git a/examples/multi_agent/llm_council_examples/marketing_strategy_council.py b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py
index b033d982..a799c364 100644
--- a/examples/multi_agent/llm_council_examples/marketing_strategy_council.py
+++ b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py
@@ -26,4 +26,3 @@ result = council.run(query)
# Print final response
print(result["final_response"])
-
diff --git a/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py
index f143945c..90532f38 100644
--- a/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py
+++ b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py
@@ -34,4 +34,3 @@ result = council.run(query)
# Print final response
print(result["final_response"])
-
diff --git a/examples/multi_agent/llm_council_examples/medical_treatment_council.py b/examples/multi_agent/llm_council_examples/medical_treatment_council.py
index cd828f1d..6084db4c 100644
--- a/examples/multi_agent/llm_council_examples/medical_treatment_council.py
+++ b/examples/multi_agent/llm_council_examples/medical_treatment_council.py
@@ -28,4 +28,3 @@ result = council.run(query)
# Print final response
print(result["final_response"])
-
diff --git a/examples/multi_agent/llm_council_examples/research_analysis_council.py b/examples/multi_agent/llm_council_examples/research_analysis_council.py
index e276c96b..74a8585a 100644
--- a/examples/multi_agent/llm_council_examples/research_analysis_council.py
+++ b/examples/multi_agent/llm_council_examples/research_analysis_council.py
@@ -29,4 +29,3 @@ result = council.run(query)
# Print final response
print(result["final_response"])
-
diff --git a/examples/multi_agent/llm_council_examples/technology_assessment_council.py b/examples/multi_agent/llm_council_examples/technology_assessment_council.py
index 72c227a6..4db4dd95 100644
--- a/examples/multi_agent/llm_council_examples/technology_assessment_council.py
+++ b/examples/multi_agent/llm_council_examples/technology_assessment_council.py
@@ -29,4 +29,3 @@ result = council.run(query)
# Print final response
print(result["final_response"])
-
diff --git a/llm_council_example.py b/llm_council_example.py
index 26f4bfec..1cc415d0 100644
--- a/llm_council_example.py
+++ b/llm_council_example.py
@@ -15,5 +15,8 @@ print(result["final_response"])
# Optionally print evaluations
for name, evaluation in result["evaluations"].items():
print(f"\n{name}:")
- print(evaluation[:500] + "..." if len(evaluation) > 500 else evaluation)
-
+ print(
+ evaluation[:500] + "..."
+ if len(evaluation) > 500
+ else evaluation
+ )
diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py
index 749002db..e64ab828 100644
--- a/swarms/agents/reasoning_agents.py
+++ b/swarms/agents/reasoning_agents.py
@@ -88,9 +88,7 @@ class ReasoningAgentRouter:
eval: bool = False,
random_models_on: bool = False,
majority_voting_prompt: Optional[str] = None,
- reasoning_model_name: Optional[
- str
- ] = "gpt-4o",
+ reasoning_model_name: Optional[str] = "gpt-4o",
):
"""
Initialize the ReasoningAgentRouter with the specified configuration.
diff --git a/swarms/agents/reasoning_duo.py b/swarms/agents/reasoning_duo.py
index 81fa0310..581a69e7 100644
--- a/swarms/agents/reasoning_duo.py
+++ b/swarms/agents/reasoning_duo.py
@@ -35,9 +35,7 @@ class ReasoningDuo:
model_names: list[str] = ["gpt-4o-mini", "gpt-4.1"],
system_prompt: str = "You are a helpful assistant that can answer questions and help with tasks.",
output_type: OutputType = "dict-all-except-first",
- reasoning_model_name: Optional[
- str
- ] = "gpt-4o",
+ reasoning_model_name: Optional[str] = "gpt-4o",
max_loops: int = 1,
*args,
**kwargs,
diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py
index 1bc3dc52..141dfe62 100644
--- a/swarms/structs/aop.py
+++ b/swarms/structs/aop.py
@@ -679,7 +679,7 @@ class AOP:
self.tool_configs: Dict[str, AgentToolConfig] = {}
self.task_queues: Dict[str, TaskQueue] = {}
self.transport = transport
-
+
self.mcp_server = FastMCP(
name=server_name,
port=port,
diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py
index b422136b..fa2a6ab5 100644
--- a/swarms/structs/llm_council.py
+++ b/swarms/structs/llm_council.py
@@ -17,12 +17,14 @@ from swarms.structs.multi_agent_exec import (
run_agents_concurrently,
batched_grid_agent_execution,
)
-
+from swarms.utils.history_output_formatter import HistoryOutputType, history_output_formatter
+from swarms.structs.conversation import Conversation
+from swarms.structs.swarm_id import swarm_id
def get_gpt_councilor_prompt() -> str:
"""
Get system prompt for GPT-5.1 councilor.
-
+
Returns:
System prompt string for GPT-5.1 councilor agent.
"""
@@ -46,7 +48,7 @@ Remember: You are part of a council where multiple AI models will respond to the
def get_gemini_councilor_prompt() -> str:
"""
Get system prompt for Gemini 3 Pro councilor.
-
+
Returns:
System prompt string for Gemini 3 Pro councilor agent.
"""
@@ -70,7 +72,7 @@ Remember: You are part of a council where multiple AI models will respond to the
def get_claude_councilor_prompt() -> str:
"""
Get system prompt for Claude Sonnet 4.5 councilor.
-
+
Returns:
System prompt string for Claude Sonnet 4.5 councilor agent.
"""
@@ -94,7 +96,7 @@ Remember: You are part of a council where multiple AI models will respond to the
def get_grok_councilor_prompt() -> str:
"""
Get system prompt for Grok-4 councilor.
-
+
Returns:
System prompt string for Grok-4 councilor agent.
"""
@@ -118,7 +120,7 @@ Remember: You are part of a council where multiple AI models will respond to the
def get_chairman_prompt() -> str:
"""
Get system prompt for the Chairman agent.
-
+
Returns:
System prompt string for the Chairman agent.
"""
@@ -141,23 +143,27 @@ Your approach:
Remember: You have access to all original responses and all evaluations. Use this rich context to create the best possible final answer."""
-def get_evaluation_prompt(query: str, responses: Dict[str, str], evaluator_name: str) -> str:
+def get_evaluation_prompt(
+ query: str, responses: Dict[str, str], evaluator_name: str
+) -> str:
"""
Create evaluation prompt for council members to review and rank responses.
-
+
Args:
query: The original user query
responses: Dictionary mapping anonymous IDs to response texts
evaluator_name: Name of the agent doing the evaluation
-
+
Returns:
Formatted evaluation prompt string
"""
- responses_text = "\n\n".join([
- f"Response {response_id}:\n{response_text}"
- for response_id, response_text in responses.items()
- ])
-
+ responses_text = "\n\n".join(
+ [
+ f"Response {response_id}:\n{response_text}"
+ for response_id, response_text in responses.items()
+ ]
+ )
+
return f"""You are evaluating responses from your fellow LLM Council members to the following query:
QUERY: {query}
@@ -191,30 +197,34 @@ def get_synthesis_prompt(
query: str,
original_responses: Dict[str, str],
evaluations: Dict[str, str],
- id_to_member: Dict[str, str]
+ id_to_member: Dict[str, str],
) -> str:
"""
Create synthesis prompt for the Chairman.
-
+
Args:
query: Original user query
original_responses: Dict mapping member names to their responses
evaluations: Dict mapping evaluator names to their evaluation texts
id_to_member: Mapping from anonymous IDs to member names
-
+
Returns:
Formatted synthesis prompt
"""
- responses_section = "\n\n".join([
- f"=== {name} ===\n{response}"
- for name, response in original_responses.items()
- ])
-
- evaluations_section = "\n\n".join([
- f"=== Evaluation by {name} ===\n{evaluation}"
- for name, evaluation in evaluations.items()
- ])
-
+ responses_section = "\n\n".join(
+ [
+ f"=== {name} ===\n{response}"
+ for name, response in original_responses.items()
+ ]
+ )
+
+ evaluations_section = "\n\n".join(
+ [
+ f"=== Evaluation by {name} ===\n{evaluation}"
+ for name, evaluation in evaluations.items()
+ ]
+ )
+
return f"""As the Chairman of the LLM Council, synthesize the following information into a final, comprehensive answer.
ORIGINAL QUERY:
@@ -246,38 +256,46 @@ class LLMCouncil:
"""
An LLM Council that orchestrates multiple specialized agents to collaboratively
answer queries through independent responses, peer review, and synthesis.
-
+
The council follows this workflow:
1. Dispatch query to all council members in parallel
2. Collect all responses (anonymized)
3. Have each member review and rank all responses
4. Chairman synthesizes everything into final response
"""
-
+
def __init__(
self,
+ id: str = swarm_id(),
+ name: str = "LLM Council",
+ description: str = "A collaborative council of LLM agents where each member independently answers a query, reviews and ranks anonymized peer responses, and a chairman synthesizes the best elements into a final answer.",
council_members: Optional[List[Agent]] = None,
chairman_model: str = "gpt-5.1",
verbose: bool = True,
+ output_type: HistoryOutputType = "dict",
):
"""
Initialize the LLM Council.
-
+
Args:
council_members: List of Agent instances representing council members.
If None, creates default council with GPT-5.1, Gemini 3 Pro,
Claude Sonnet 4.5, and Grok-4.
chairman_model: Model name for the Chairman agent that synthesizes responses.
verbose: Whether to print progress and intermediate results.
+ output_type: Format for the output. Options: "list", "dict", "string", "final", "json", "yaml", etc.
"""
+ self.name = name
+ self.description = description
self.verbose = verbose
-
+ self.output_type = output_type
+
# Create default council members if none provided
if council_members is None:
self.council_members = self._create_default_council()
else:
self.council_members = council_members
-
+
# Create Chairman agent
self.chairman = Agent(
agent_name="Chairman",
@@ -289,19 +307,25 @@ class LLMCouncil:
temperature=0.7,
)
+ self.conversation = Conversation(name=f"[LLM Council] [Conversation][{name}]")
+
if self.verbose:
- print(f"šļø LLM Council initialized with {len(self.council_members)} members")
+ print(
+ f"šļø LLM Council initialized with {len(self.council_members)} members"
+ )
for i, member in enumerate(self.council_members, 1):
- print(f" {i}. {member.agent_name} ({member.model_name})")
-
+ print(
+ f" {i}. {member.agent_name} ({member.model_name})"
+ )
+
def _create_default_council(self) -> List[Agent]:
"""
Create default council members with specialized prompts and models.
-
+
Returns:
List of Agent instances configured as council members.
"""
-
+
# GPT-5.1 Agent - Analytical and comprehensive
gpt_agent = Agent(
agent_name="GPT-5.1-Councilor",
@@ -312,7 +336,7 @@ class LLMCouncil:
verbose=False,
temperature=0.7,
)
-
+
# Gemini 3 Pro Agent - Concise and processed
gemini_agent = Agent(
agent_name="Gemini-3-Pro-Councilor",
@@ -323,7 +347,7 @@ class LLMCouncil:
verbose=False,
temperature=0.7,
)
-
+
# Claude Sonnet 4.5 Agent - Balanced and thoughtful
claude_agent = Agent(
agent_name="Claude-Sonnet-4.5-Councilor",
@@ -335,7 +359,7 @@ class LLMCouncil:
temperature=0.0,
top_p=None,
)
-
+
# Grok-4 Agent - Creative and innovative
grok_agent = Agent(
agent_name="Grok-4-Councilor",
@@ -346,114 +370,135 @@ class LLMCouncil:
verbose=False,
temperature=0.8,
)
-
+
members = [gpt_agent, gemini_agent, claude_agent, grok_agent]
-
+
return members
-
- def run(self, query: str) -> Dict:
+
+ def run(self, query: str):
"""
Execute the full LLM Council workflow.
-
+
Args:
query: The user's query to process
-
+
Returns:
- Dictionary containing:
- - original_responses: Dict mapping member names to their responses
- - evaluations: Dict mapping evaluator names to their rankings
- - final_response: The Chairman's synthesized final answer
+ Formatted output based on output_type, containing conversation history
+ with all council member responses, evaluations, and final synthesis.
"""
if self.verbose:
print(f"\n{'='*80}")
print("šļø LLM COUNCIL SESSION")
- print("="*80)
+ print("=" * 80)
print(f"\nš Query: {query}\n")
-
+
+ # Add user query to conversation
+ self.conversation.add(role="User", content=query)
+
# Step 1: Get responses from all council members in parallel
if self.verbose:
print("š¤ Dispatching query to all council members...")
-
+
results_dict = run_agents_concurrently(
self.council_members,
task=query,
- return_agent_output_dict=True
+ return_agent_output_dict=True,
)
-
+
# Map results to member names
original_responses = {
member.agent_name: response
- for member, response in zip(self.council_members,
- [results_dict.get(member.agent_name, "")
- for member in self.council_members])
+ for member, response in zip(
+ self.council_members,
+ [
+ results_dict.get(member.agent_name, "")
+ for member in self.council_members
+ ],
+ )
}
-
+
+ # Add each council member's response to conversation
+ for member_name, response in original_responses.items():
+ self.conversation.add(role=member_name, content=response)
+
if self.verbose:
- print(f"ā
Received {len(original_responses)} responses\n")
+ print(
+ f"ā
Received {len(original_responses)} responses\n"
+ )
for name, response in original_responses.items():
print(f" {name}: {response[:100]}...")
-
+
# Step 2: Anonymize responses for evaluation
# Create anonymous IDs (A, B, C, D, etc.)
- anonymous_ids = [chr(65 + i) for i in range(len(self.council_members))]
+ anonymous_ids = [
+ chr(65 + i) for i in range(len(self.council_members))
+ ]
random.shuffle(anonymous_ids) # Shuffle to ensure anonymity
-
+
anonymous_responses = {
anonymous_ids[i]: original_responses[member.agent_name]
for i, member in enumerate(self.council_members)
}
-
+
# Create mapping from anonymous ID to member name (for later reference)
id_to_member = {
anonymous_ids[i]: member.agent_name
for i, member in enumerate(self.council_members)
}
-
+
if self.verbose:
- print("\nš Council members evaluating each other's responses...")
-
+ print(
+ "\nš Council members evaluating each other's responses..."
+ )
+
# Step 3: Have each member evaluate and rank all responses concurrently
# Create evaluation tasks for each member
evaluation_tasks = [
- get_evaluation_prompt(query, anonymous_responses, member.agent_name)
+ get_evaluation_prompt(
+ query, anonymous_responses, member.agent_name
+ )
for member in self.council_members
]
-
+
# Run evaluations concurrently using batched_grid_agent_execution
evaluation_results = batched_grid_agent_execution(
- self.council_members,
- evaluation_tasks
+ self.council_members, evaluation_tasks
)
-
+
# Map results to member names
evaluations = {
member.agent_name: evaluation_results[i]
for i, member in enumerate(self.council_members)
}
-
+
+ # Add each council member's evaluation to conversation
+ for member_name, evaluation in evaluations.items():
+ self.conversation.add(
+ role=f"{member_name}-Evaluation", content=evaluation
+ )
+
if self.verbose:
print(f"ā
Received {len(evaluations)} evaluations\n")
-
+
# Step 4: Chairman synthesizes everything
if self.verbose:
print("š Chairman synthesizing final response...\n")
-
+
synthesis_prompt = get_synthesis_prompt(
query, original_responses, evaluations, id_to_member
)
-
+
final_response = self.chairman.run(task=synthesis_prompt)
-
+
+ # Add chairman's final response to conversation
+ self.conversation.add(role="Chairman", content=final_response)
+
if self.verbose:
print(f"{'='*80}")
print("ā
FINAL RESPONSE")
print(f"{'='*80}\n")
-
- return {
- "query": query,
- "original_responses": original_responses,
- "evaluations": evaluations,
- "final_response": final_response,
- "anonymous_mapping": id_to_member,
- }
+ # Format and return output using history_output_formatter
+ return history_output_formatter(
+ conversation=self.conversation, type=self.output_type
+ )
diff --git a/tests/structs/test_auto_swarms_builder.py b/tests/structs/test_auto_swarms_builder.py
index 768256e1..1d6e8762 100644
--- a/tests/structs/test_auto_swarms_builder.py
+++ b/tests/structs/test_auto_swarms_builder.py
@@ -168,7 +168,9 @@ def test_error_handling():
# Test with invalid agent configuration
print("Testing invalid agent configuration...")
try:
- swarm.create_agents_from_specs({"agents": [{"agent_name": ""}]})
+ swarm.create_agents_from_specs(
+ {"agents": [{"agent_name": ""}]}
+ )
print(
"ā Should have raised an error for empty agent configuration"
)
diff --git a/tests/structs/test_i_agent.py b/tests/structs/test_i_agent.py
index 3edf9a8e..1c1f95c5 100644
--- a/tests/structs/test_i_agent.py
+++ b/tests/structs/test_i_agent.py
@@ -1,5 +1,3 @@
-import pytest
-
from swarms.agents.i_agent import IterativeReflectiveExpansion
diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py
index e4a48a20..99dd73ae 100644
--- a/tests/structs/test_sequential_workflow.py
+++ b/tests/structs/test_sequential_workflow.py
@@ -3,7 +3,6 @@ import pytest
from swarms import Agent, SequentialWorkflow
-
def test_sequential_workflow_initialization_with_agents():
"""Test SequentialWorkflow initialization with agents"""
agent1 = Agent(