parent
2aa5434599
commit
904f80d60b
@ -0,0 +1,166 @@
|
||||
# ReasoningAgentRouter Documentation
|
||||
|
||||
The ReasoningAgentRouter is a sophisticated agent routing system that enables dynamic selection and execution of different reasoning strategies based on the task requirements. It provides a flexible interface to work with multiple reasoning approaches including Reasoning Duo, Self-Consistency, and Iterative Reflective Expansion (IRE).
|
||||
|
||||
## Architecture
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
Task[Task Input] --> Router[ReasoningAgentRouter]
|
||||
Router --> SelectSwarm{Select Swarm Type}
|
||||
SelectSwarm -->|Reasoning Duo| RD[ReasoningDuo]
|
||||
SelectSwarm -->|Self Consistency| SC[SelfConsistencyAgent]
|
||||
SelectSwarm -->|IRE| IRE[IterativeReflectiveExpansion]
|
||||
RD --> Output[Task Output]
|
||||
SC --> Output
|
||||
IRE --> Output
|
||||
```
|
||||
|
||||
## Class: ReasoningAgentRouter
|
||||
|
||||
### Arguments
|
||||
|
||||
| Argument | Type | Default | Description |
|
||||
|----------|------|---------|-------------|
|
||||
| agent_name | str | "reasoning_agent" | Name identifier for the agent |
|
||||
| description | str | "A reasoning agent..." | Description of the agent's capabilities |
|
||||
| model_name | str | "gpt-4o-mini" | The underlying language model to use |
|
||||
| system_prompt | str | "You are a helpful..." | System prompt for the agent |
|
||||
| max_loops | int | 1 | Maximum number of reasoning loops |
|
||||
| swarm_type | agent_types | "reasoning_duo" | Type of reasoning swarm to use |
|
||||
| num_samples | int | 1 | Number of samples for self-consistency |
|
||||
| output_type | OutputType | "dict" | Format of the output |
|
||||
|
||||
### Methods
|
||||
|
||||
| Method | Description |
|
||||
|--------|-------------|
|
||||
| select_swarm() | Selects and initializes the appropriate reasoning swarm based on specified type |
|
||||
| run(task: str) | Executes the selected swarm's reasoning process on the given task |
|
||||
| batched_run(tasks: List[str]) | Executes the reasoning process on a batch of tasks |
|
||||
|
||||
### Swarm Types
|
||||
|
||||
1. **ReasoningDuo**
|
||||
- Uses two agents working together
|
||||
- One for reasoning, one for execution
|
||||
- Best for tasks requiring both analysis and action
|
||||
|
||||
2. **SelfConsistencyAgent**
|
||||
- Generates multiple samples
|
||||
- Ensures consistency across reasoning paths
|
||||
- Ideal for tasks requiring high reliability
|
||||
|
||||
3. **IterativeReflectiveExpansion (IRE)**
|
||||
- Uses iterative refinement
|
||||
- Reflects on and improves reasoning paths
|
||||
- Best for complex problem-solving
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```python
|
||||
from swarms.agents.reasoning_agents import ReasoningAgentRouter
|
||||
|
||||
# Initialize the router
|
||||
router = ReasoningAgentRouter(
|
||||
agent_name="reasoning-agent",
|
||||
description="A reasoning agent that can answer questions and help with tasks.",
|
||||
model_name="gpt-4o-mini",
|
||||
system_prompt="You are a helpful assistant that can answer questions and help with tasks.",
|
||||
max_loops=1,
|
||||
swarm_type="self-consistency",
|
||||
num_samples=1,
|
||||
output_type="list"
|
||||
)
|
||||
|
||||
# Run a single task
|
||||
result = router.run("What is the best approach to solve this problem?")
|
||||
```
|
||||
|
||||
### Batch Processing
|
||||
|
||||
```python
|
||||
# Process multiple tasks
|
||||
tasks = [
|
||||
"What is the optimal solution for X?",
|
||||
"How should we approach problem Y?"
|
||||
]
|
||||
results = router.batched_run(tasks)
|
||||
```
|
||||
|
||||
### Using Different Swarm Types
|
||||
|
||||
#### ReasoningDuo
|
||||
|
||||
```python
|
||||
router = ReasoningAgentRouter(
|
||||
swarm_type="reasoning-duo",
|
||||
model_name="gpt-4o-mini"
|
||||
)
|
||||
```
|
||||
|
||||
#### Self-Consistency
|
||||
|
||||
```python
|
||||
router = ReasoningAgentRouter(
|
||||
swarm_type="self-consistency",
|
||||
num_samples=3,
|
||||
model_name="gpt-4o-mini"
|
||||
)
|
||||
```
|
||||
|
||||
#### IRE
|
||||
|
||||
```python
|
||||
router = ReasoningAgentRouter(
|
||||
swarm_type="ire",
|
||||
max_loops=5,
|
||||
model_name="gpt-4o-mini"
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Swarm Type Selection**
|
||||
- Use ReasoningDuo for tasks requiring both analysis and action
|
||||
- Use SelfConsistency for tasks requiring high reliability
|
||||
- Use IRE for complex problem-solving requiring iterative refinement
|
||||
|
||||
2. **Performance Optimization**
|
||||
- Adjust max_loops based on task complexity
|
||||
- Increase num_samples for higher reliability
|
||||
- Choose appropriate model_name based on task requirements
|
||||
|
||||
3. **Output Handling**
|
||||
- Use appropriate output_type for your needs
|
||||
- Process batched results appropriately
|
||||
- Handle errors gracefully
|
||||
|
||||
## Error Handling
|
||||
|
||||
The ReasoningAgentRouter includes built-in error handling for:
|
||||
- Invalid swarm types
|
||||
- Model execution failures
|
||||
- Task processing errors
|
||||
|
||||
## Limitations
|
||||
|
||||
1. Processing time increases with:
|
||||
- Higher num_samples
|
||||
- Larger max_loops
|
||||
- More complex tasks
|
||||
|
||||
2. Model-specific limitations based on:
|
||||
- Token limits
|
||||
- Model capabilities
|
||||
- API rate limits
|
||||
|
||||
## Contributing
|
||||
|
||||
When extending the ReasoningAgentRouter:
|
||||
1. Follow the existing swarm interface
|
||||
2. Add comprehensive tests
|
||||
3. Update documentation
|
||||
4. Maintain error handling
|
@ -0,0 +1,24 @@
|
||||
from swarms.agents.reasoning_agents import ReasoningAgentRouter
|
||||
|
||||
reasoning_agent_router = ReasoningAgentRouter(
|
||||
agent_name="reasoning-agent",
|
||||
description="A reasoning agent that can answer questions and help with tasks.",
|
||||
model_name="gpt-4o-mini",
|
||||
system_prompt="You are a helpful assistant that can answer questions and help with tasks.",
|
||||
max_loops=1,
|
||||
swarm_type="self-consistency",
|
||||
num_samples=1,
|
||||
output_type="list",
|
||||
)
|
||||
|
||||
reasoning_agent_router.run(
|
||||
"What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf."
|
||||
)
|
||||
|
||||
|
||||
# reasoning_agent_router.batched_run(
|
||||
# [
|
||||
# "What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf.",
|
||||
# "What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf.",
|
||||
# ]
|
||||
# )
|
@ -0,0 +1,17 @@
|
||||
from swarms.agents.reasoning_duo import ReasoningDuo
|
||||
|
||||
reasoning_duo = ReasoningDuo(
|
||||
system_prompt="You are a helpful assistant that can answer questions and help with tasks.",
|
||||
model_names=["gpt-4o-mini", "gpt-4o-mini"],
|
||||
)
|
||||
|
||||
reasoning_duo.run(
|
||||
"What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf."
|
||||
)
|
||||
|
||||
reasoning_duo.batched_run(
|
||||
[
|
||||
"What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf.",
|
||||
"What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf.",
|
||||
]
|
||||
)
|
@ -0,0 +1,130 @@
|
||||
from typing import List, Literal
|
||||
|
||||
from swarms.agents.consistency_agent import SelfConsistencyAgent
|
||||
from swarms.agents.i_agent import (
|
||||
IterativeReflectiveExpansion as IREAgent,
|
||||
)
|
||||
from swarms.agents.reasoning_duo import ReasoningDuo
|
||||
from swarms.structs.output_types import OutputType
|
||||
|
||||
agent_types = Literal[
|
||||
"reasoning-duo",
|
||||
"self-consistency",
|
||||
"ire",
|
||||
"reasoning-agent",
|
||||
"consistency-agent",
|
||||
"ire-agent",
|
||||
]
|
||||
|
||||
|
||||
class ReasoningAgentRouter:
|
||||
"""
|
||||
A Reasoning Agent that can answer questions and assist with various tasks using different reasoning strategies.
|
||||
|
||||
Attributes:
|
||||
agent_name (str): The name of the agent.
|
||||
description (str): A brief description of the agent's capabilities.
|
||||
model_name (str): The name of the model used for reasoning.
|
||||
system_prompt (str): The prompt that guides the agent's reasoning process.
|
||||
max_loops (int): The maximum number of loops for the reasoning process.
|
||||
swarm_type (agent_types): The type of reasoning swarm to use (e.g., reasoning duo, self-consistency, IRE).
|
||||
num_samples (int): The number of samples to generate for self-consistency agents.
|
||||
output_type (OutputType): The format of the output (e.g., dict, list).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
agent_name: str = "reasoning_agent",
|
||||
description: str = "A reasoning agent that can answer questions and help with tasks.",
|
||||
model_name: str = "gpt-4o-mini",
|
||||
system_prompt: str = "You are a helpful assistant that can answer questions and help with tasks.",
|
||||
max_loops: int = 1,
|
||||
swarm_type: agent_types = "reasoning_duo",
|
||||
num_samples: int = 1,
|
||||
output_type: OutputType = "dict",
|
||||
):
|
||||
self.agent_name = agent_name
|
||||
self.description = description
|
||||
self.model_name = model_name
|
||||
self.system_prompt = system_prompt
|
||||
self.max_loops = max_loops
|
||||
self.swarm_type = swarm_type
|
||||
self.num_samples = num_samples
|
||||
self.output_type = output_type
|
||||
|
||||
def select_swarm(self):
|
||||
"""
|
||||
Selects and initializes the appropriate reasoning swarm based on the specified swarm type.
|
||||
|
||||
Returns:
|
||||
An instance of the selected reasoning swarm.
|
||||
"""
|
||||
if (
|
||||
self.swarm_type == "reasoning-duo"
|
||||
or self.swarm_type == "reasoning-agent"
|
||||
):
|
||||
return ReasoningDuo(
|
||||
agent_name=self.agent_name,
|
||||
agent_description=self.description,
|
||||
model_name=[self.model_name, self.model_name],
|
||||
system_prompt=self.system_prompt,
|
||||
output_type=self.output_type,
|
||||
)
|
||||
|
||||
elif (
|
||||
self.swarm_type == "self-consistency"
|
||||
or self.swarm_type == "consistency-agent"
|
||||
):
|
||||
return SelfConsistencyAgent(
|
||||
agent_name=self.agent_name,
|
||||
description=self.description,
|
||||
model_name=self.model_name,
|
||||
system_prompt=self.system_prompt,
|
||||
max_loops=self.max_loops,
|
||||
num_samples=self.num_samples,
|
||||
output_type=self.output_type,
|
||||
)
|
||||
|
||||
elif (
|
||||
self.swarm_type == "ire" or self.swarm_type == "ire-agent"
|
||||
):
|
||||
return IREAgent(
|
||||
agent_name=self.agent_name,
|
||||
description=self.description,
|
||||
model_name=self.model_name,
|
||||
system_prompt=self.system_prompt,
|
||||
max_loops=self.max_loops,
|
||||
max_iterations=self.num_samples,
|
||||
output_type=self.output_type,
|
||||
)
|
||||
|
||||
else:
|
||||
raise ValueError(f"Invalid swarm type: {self.swarm_type}")
|
||||
|
||||
def run(self, task: str, *args, **kwargs):
|
||||
"""
|
||||
Executes the selected swarm's reasoning process on the given task.
|
||||
|
||||
Args:
|
||||
task (str): The task or question to be processed by the reasoning agent.
|
||||
|
||||
Returns:
|
||||
The result of the reasoning process.
|
||||
"""
|
||||
swarm = self.select_swarm()
|
||||
return swarm.run(task=task)
|
||||
|
||||
def batched_run(self, tasks: List[str], *args, **kwargs):
|
||||
"""
|
||||
Executes the reasoning process on a batch of tasks.
|
||||
|
||||
Args:
|
||||
tasks (List[str]): A list of tasks to be processed.
|
||||
|
||||
Returns:
|
||||
List of results from the reasoning process for each task.
|
||||
"""
|
||||
results = []
|
||||
for task in tasks:
|
||||
results.append(self.run(task, *args, **kwargs))
|
||||
return results
|
@ -0,0 +1,273 @@
|
||||
from typing import List, Dict, Any, Optional
|
||||
import time
|
||||
from loguru import logger
|
||||
from swarms.structs.agent import Agent
|
||||
|
||||
# Prompt templates for different agent roles
|
||||
GENERATOR_PROMPT = """You are a knowledgeable assistant tasked with providing accurate information on a wide range of topics.
|
||||
|
||||
Your responsibilities:
|
||||
1. Provide accurate information based on your training data
|
||||
2. Use clear, concise language
|
||||
3. Acknowledge limitations in your knowledge
|
||||
4. Abstain from making up information when uncertain
|
||||
|
||||
When responding to queries:
|
||||
- Stick to verified facts
|
||||
- Cite your sources when possible
|
||||
- Clearly distinguish between firmly established facts and more tentative claims
|
||||
- Use phrases like "I'm not certain about..." or "Based on my knowledge up to my training cutoff..." when appropriate
|
||||
- Avoid overly confident language for uncertain topics
|
||||
|
||||
Remember, it's better to acknowledge ignorance than to provide incorrect information.
|
||||
"""
|
||||
|
||||
CRITIC_PROMPT = """You are a critical reviewer tasked with identifying potential inaccuracies, hallucinations, or unsupported claims in AI-generated text.
|
||||
|
||||
Your responsibilities:
|
||||
1. Carefully analyze the provided text for factual errors
|
||||
2. Identify claims that lack sufficient evidence
|
||||
3. Spot logical inconsistencies
|
||||
4. Flag overly confident language on uncertain topics
|
||||
5. Detect potentially hallucinated details (names, dates, statistics, etc.)
|
||||
|
||||
For each issue detected, you should:
|
||||
- Quote the specific problematic text
|
||||
- Explain why it's potentially inaccurate
|
||||
- Rate the severity of the issue (low/medium/high)
|
||||
- Suggest a specific correction or improvement
|
||||
|
||||
Focus particularly on:
|
||||
- Unfounded claims presented as facts
|
||||
- Highly specific details that seem suspicious
|
||||
- Logical contradictions
|
||||
- Anachronisms or temporal inconsistencies
|
||||
- Claims that contradict common knowledge
|
||||
|
||||
Be thorough and specific in your critique. Provide actionable feedback for improvement.
|
||||
"""
|
||||
|
||||
REFINER_PROMPT = """You are a refinement specialist tasked with improving text based on critical feedback.
|
||||
|
||||
Your responsibilities:
|
||||
1. Carefully review the original text and the critical feedback
|
||||
2. Make precise modifications to address all identified issues
|
||||
3. Ensure factual accuracy in the refined version
|
||||
4. Maintain the intended tone and style of the original
|
||||
5. Add appropriate epistemic status markers (e.g., "likely", "possibly", "according to...")
|
||||
|
||||
Guidelines for refinement:
|
||||
- Remove or qualify unsupported claims
|
||||
- Replace specific details with more general statements when evidence is lacking
|
||||
- Add appropriate hedging language where certainty is not warranted
|
||||
- Maintain the helpful intent of the original response
|
||||
- Ensure logical consistency throughout the refined text
|
||||
- Add qualifiers or clarify knowledge limitations where appropriate
|
||||
|
||||
The refined text should be helpful and informative while being scrupulously accurate.
|
||||
"""
|
||||
|
||||
VALIDATOR_PROMPT = """You are a validation expert tasked with ensuring the highest standards of accuracy in refined AI outputs.
|
||||
|
||||
Your responsibilities:
|
||||
1. Verify that all critical issues from previous feedback have been properly addressed
|
||||
2. Check for any remaining factual inaccuracies or unsupported claims
|
||||
3. Ensure appropriate epistemic status markers are used
|
||||
4. Confirm the response maintains a helpful tone while being accurate
|
||||
5. Provide a final assessment of the response quality
|
||||
|
||||
Assessment structure:
|
||||
- Issue resolution: Have all previously identified issues been addressed? (Yes/No/Partially)
|
||||
- Remaining concerns: Are there any remaining factual or logical issues? (List if any)
|
||||
- Epistemics: Does the response appropriately indicate confidence levels? (Yes/No/Needs improvement)
|
||||
- Helpfulness: Does the response remain helpful despite necessary qualifications? (Yes/No/Partially)
|
||||
- Overall assessment: Final verdict on whether the response is ready for user consumption (Approved/Needs further refinement)
|
||||
|
||||
If approved, explain what makes this response trustworthy. If further refinement is needed, provide specific guidance.
|
||||
"""
|
||||
|
||||
|
||||
class DeHallucinationSwarm:
|
||||
"""
|
||||
A system of multiple agents that work together to reduce hallucinations in generated content.
|
||||
The system works through multiple rounds of generation, criticism, refinement, and validation.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str = "DeHallucinationSwarm",
|
||||
description: str = "A system of multiple agents that work together to reduce hallucinations in generated content.",
|
||||
model_names: List[str] = [
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini",
|
||||
],
|
||||
iterations: int = 2,
|
||||
system_prompt: str = GENERATOR_PROMPT,
|
||||
store_intermediate_results: bool = True,
|
||||
):
|
||||
"""
|
||||
Initialize the DeHallucinationSwarm with configurable agents.
|
||||
|
||||
Args:
|
||||
model_names: List of model names for generator, critic, refiner, and validator
|
||||
iterations: Number of criticism-refinement cycles to perform
|
||||
store_intermediate_results: Whether to store all intermediate outputs
|
||||
"""
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.iterations = iterations
|
||||
self.store_intermediate_results = store_intermediate_results
|
||||
self.system_prompt = system_prompt
|
||||
self.history = []
|
||||
|
||||
# Initialize all agents
|
||||
self.generator = Agent(
|
||||
agent_name="Generator",
|
||||
description="An agent that generates initial responses to queries",
|
||||
system_prompt=GENERATOR_PROMPT,
|
||||
model_name=model_names[0],
|
||||
)
|
||||
|
||||
self.critic = Agent(
|
||||
agent_name="Critic",
|
||||
description="An agent that critiques responses for potential inaccuracies",
|
||||
system_prompt=CRITIC_PROMPT,
|
||||
model_name=model_names[1],
|
||||
)
|
||||
|
||||
self.refiner = Agent(
|
||||
agent_name="Refiner",
|
||||
description="An agent that refines responses based on critique",
|
||||
system_prompt=REFINER_PROMPT,
|
||||
model_name=model_names[2],
|
||||
)
|
||||
|
||||
self.validator = Agent(
|
||||
agent_name="Validator",
|
||||
description="An agent that performs final validation of refined content",
|
||||
system_prompt=VALIDATOR_PROMPT,
|
||||
model_name=model_names[3],
|
||||
)
|
||||
|
||||
def _log_step(
|
||||
self,
|
||||
step_name: str,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
"""Record a step in the swarm's processing history"""
|
||||
if self.store_intermediate_results:
|
||||
timestamp = time.time()
|
||||
step_record = {
|
||||
"timestamp": timestamp,
|
||||
"step": step_name,
|
||||
"content": content,
|
||||
}
|
||||
if metadata:
|
||||
step_record["metadata"] = metadata
|
||||
|
||||
self.history.append(step_record)
|
||||
logger.debug(f"Logged step: {step_name}")
|
||||
|
||||
def run(self, query: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Process a query through the swarm's multi-agent refinement cycle.
|
||||
|
||||
Args:
|
||||
query: The user's query to process
|
||||
|
||||
Returns:
|
||||
Dict containing the final response and processing metadata
|
||||
"""
|
||||
logger.info(f"Processing query: {query}")
|
||||
self.history = [] # Reset history for new query
|
||||
|
||||
# Generate initial response
|
||||
initial_response = self.generator.run(query)
|
||||
self._log_step(
|
||||
"initial_generation", initial_response, {"query": query}
|
||||
)
|
||||
|
||||
current_response = initial_response
|
||||
|
||||
# Perform multiple iteration cycles
|
||||
for i in range(self.iterations):
|
||||
logger.info(f"Starting iteration {i+1}/{self.iterations}")
|
||||
|
||||
# Step 1: Critique the current response
|
||||
critique = self.critic.run(
|
||||
f"Review the following response to the query: '{query}'\n\n{current_response}"
|
||||
)
|
||||
self._log_step(f"critique_{i+1}", critique)
|
||||
|
||||
# Step 2: Refine based on critique
|
||||
refined_response = self.refiner.run(
|
||||
f"Refine the following response based on the critique provided.\n\n"
|
||||
f"Original query: {query}\n\n"
|
||||
f"Original response: {current_response}\n\n"
|
||||
f"Critique: {critique}"
|
||||
)
|
||||
self._log_step(f"refinement_{i+1}", refined_response)
|
||||
|
||||
# Update current response for next iteration
|
||||
current_response = refined_response
|
||||
|
||||
# Final validation
|
||||
validation = self.validator.run(
|
||||
f"Validate the following refined response for accuracy and helpfulness.\n\n"
|
||||
f"Original query: {query}\n\n"
|
||||
f"Final response: {current_response}"
|
||||
)
|
||||
self._log_step("final_validation", validation)
|
||||
|
||||
# Prepare results
|
||||
result = {
|
||||
"query": query,
|
||||
"final_response": current_response,
|
||||
"validation_result": validation,
|
||||
"iteration_count": self.iterations,
|
||||
}
|
||||
|
||||
if self.store_intermediate_results:
|
||||
result["processing_history"] = self.history
|
||||
|
||||
return result
|
||||
|
||||
def batch_run(self, queries: List[str]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Process multiple queries through the swarm.
|
||||
|
||||
Args:
|
||||
queries: List of user queries to process
|
||||
|
||||
Returns:
|
||||
List of result dictionaries, one per query
|
||||
"""
|
||||
results = []
|
||||
for query in queries:
|
||||
logger.info(f"Processing batch query: {query}")
|
||||
results.append(self.run(query))
|
||||
return results
|
||||
|
||||
|
||||
# # Example usage
|
||||
# if __name__ == "__main__":
|
||||
# # Configure logger
|
||||
# logger.add("dehallucinationswarm.log", rotation="10 MB")
|
||||
|
||||
# # Create swarm instance
|
||||
# swarm = DeHallucinationSwarm(iterations=2)
|
||||
|
||||
# # Example queries that might tempt hallucination
|
||||
# test_queries = [
|
||||
# "Tell me about the history of quantum computing",
|
||||
# "What are the specific details of the Treaty of Utrecht?",
|
||||
# "Who won the Nobel Prize in Physics in 2020?",
|
||||
# "What are the main causes of the economic recession of 2008?",
|
||||
# ]
|
||||
|
||||
# # Process batch of queries
|
||||
# results = swarm.batch_run(test_queries)
|
||||
# print(results)
|
@ -0,0 +1,14 @@
|
||||
from swarms.structs.conversation import Conversation
|
||||
|
||||
|
||||
def history_output_formatter(
|
||||
conversation: Conversation, type: str = "list"
|
||||
):
|
||||
if type == "list":
|
||||
return conversation.return_messages_as_list()
|
||||
elif type == "dict":
|
||||
return conversation.to_dict()
|
||||
elif type == "string" or type == "str":
|
||||
return conversation.get_str()
|
||||
else:
|
||||
raise ValueError(f"Invalid type: {type}")
|
Loading…
Reference in new issue