pull/845/head
Kye Gomez 4 weeks ago
parent c43180de14
commit 48e7fd8a79

@ -0,0 +1,206 @@
# AI generate initial response
# AI decides how many "thinking rounds" it needs
# For each round:
# Generates 3 alternative responses
# Evaluates all responses
# Picks the best one
# Final response is the survivor of this AI battle royale
from swarms import Agent
# OpenAI function schema for determining thinking rounds
thinking_rounds_schema = {
"name": "determine_thinking_rounds",
"description": "Determines the optimal number of thinking rounds needed for a task",
"parameters": {
"type": "object",
"properties": {
"num_rounds": {
"type": "integer",
"description": "The number of thinking rounds needed (1-5)",
"minimum": 1,
"maximum": 5,
}
},
"required": ["num_rounds"],
},
}
# System prompt for determining thinking rounds
THINKING_ROUNDS_PROMPT = """You are an expert at determining the optimal number of thinking rounds needed for complex tasks. Your role is to analyze the task and determine how many rounds of thinking and evaluation would be most beneficial.
Consider the following factors when determining the number of rounds:
1. Task Complexity: More complex tasks may require more rounds
2. Potential for Multiple Valid Approaches: Tasks with multiple valid solutions need more rounds
3. Risk of Error: Higher-stakes tasks may benefit from more rounds
4. Time Sensitivity: Balance thoroughness with efficiency
Guidelines for number of rounds:
- 1 round: Simple, straightforward tasks with clear solutions
- 2-3 rounds: Moderately complex tasks with some ambiguity
- 4-5 rounds: Highly complex tasks with multiple valid approaches or high-stakes decisions
Your response should be a single number between 1 and 5, representing the optimal number of thinking rounds needed."""
# Schema for generating alternative responses
alternative_responses_schema = {
"name": "generate_alternatives",
"description": "Generates multiple alternative responses to a task",
"parameters": {
"type": "object",
"properties": {
"alternatives": {
"type": "array",
"description": "List of alternative responses",
"items": {
"type": "object",
"properties": {
"response": {
"type": "string",
"description": "The alternative response",
},
"reasoning": {
"type": "string",
"description": "Explanation of why this approach was chosen",
},
},
"required": ["response", "reasoning"],
},
"minItems": 3,
"maxItems": 3,
}
},
"required": ["alternatives"],
},
}
# Schema for evaluating responses
evaluation_schema = {
"name": "evaluate_responses",
"description": "Evaluates and ranks alternative responses",
"parameters": {
"type": "object",
"properties": {
"evaluation": {
"type": "object",
"properties": {
"best_response": {
"type": "string",
"description": "The selected best response",
},
"ranking": {
"type": "array",
"description": "Ranked list of responses from best to worst",
"items": {
"type": "object",
"properties": {
"response": {
"type": "string",
"description": "The response",
},
"score": {
"type": "number",
"description": "Score from 0-100",
},
"reasoning": {
"type": "string",
"description": "Explanation of the score",
},
},
"required": [
"response",
"score",
"reasoning",
],
},
},
},
"required": ["best_response", "ranking"],
}
},
"required": ["evaluation"],
},
}
# System prompt for generating alternatives
ALTERNATIVES_PROMPT = """You are an expert at generating diverse and creative alternative responses to tasks. Your role is to generate 3 distinct approaches to solving the given task.
For each alternative:
1. Consider a different perspective or approach
2. Provide clear reasoning for why this approach might be effective
3. Ensure alternatives are meaningfully different from each other
4. Maintain high quality and relevance to the task
Your response should include 3 alternatives, each with its own reasoning."""
# System prompt for evaluation
EVALUATION_PROMPT = """You are an expert at evaluating and comparing different responses to tasks. Your role is to critically analyze each response and determine which is the most effective.
Consider the following criteria when evaluating:
1. Relevance to the task
2. Completeness of the solution
3. Creativity and innovation
4. Practicality and feasibility
5. Clarity and coherence
Your response should include:
1. The best response selected
2. A ranked list of all responses with scores and reasoning"""
class CortAgent:
def __init__(
self,
alternative_responses: int = 3,
):
self.thinking_rounds = Agent(
agent_name="CortAgent",
agent_description="CortAgent is a multi-step agent that uses a battle royale approach to determine the best response to a task.",
model_name="gpt-4o-mini",
max_loops=1,
dynamic_temperature_enabled=True,
tools_list_dictionary=thinking_rounds_schema,
system_prompt=THINKING_ROUNDS_PROMPT,
)
self.alternatives_agent = Agent(
agent_name="CortAgentAlternatives",
agent_description="Generates multiple alternative responses to a task",
model_name="gpt-4o-mini",
max_loops=1,
dynamic_temperature_enabled=True,
tools_list_dictionary=alternative_responses_schema,
system_prompt=ALTERNATIVES_PROMPT,
)
self.evaluation_agent = Agent(
agent_name="CortAgentEvaluation",
agent_description="Evaluates and ranks alternative responses",
model_name="gpt-4o-mini",
max_loops=1,
dynamic_temperature_enabled=True,
tools_list_dictionary=evaluation_schema,
system_prompt=EVALUATION_PROMPT,
)
def run(self, task: str):
# First determine number of thinking rounds
num_rounds = self.thinking_rounds.run(task)
# Initialize with the task
current_task = task
best_response = None
# Run the battle royale for the determined number of rounds
for round_num in range(num_rounds):
# Generate alternatives
alternatives = self.alternatives_agent.run(current_task)
# Evaluate alternatives
evaluation = self.evaluation_agent.run(alternatives)
# Update best response and current task for next round
best_response = evaluation["evaluation"]["best_response"]
current_task = f"Previous best response: {best_response}\nOriginal task: {task}"
return best_response

@ -193,6 +193,7 @@ nav:
- Documentation: - Documentation:
- Agent Class Documentation: "swarms/structs/agent.md" - Agent Class Documentation: "swarms/structs/agent.md"
- Create and Run Agents from YAML: "swarms/agents/create_agents_yaml.md" - Create and Run Agents from YAML: "swarms/agents/create_agents_yaml.md"
- Integrating Various Models into Your Agents: "swarms/models/agent_and_models.md"
- Tools: - Tools:
- Structured Outputs: "swarms/agents/structured_outputs.md" - Structured Outputs: "swarms/agents/structured_outputs.md"
- Overview: "swarms/tools/main.md" - Overview: "swarms/tools/main.md"

@ -0,0 +1,196 @@
# Model Integration in Agents
!!! info "About Model Integration"
Agents supports multiple model providers through LiteLLM integration, allowing you to easily switch between different language models. This document outlines the available providers and how to use them with agents.
## Important Note on Model Names
!!! warning "Required Format"
When specifying a model in Swarms, you must use the format `provider/model_name`. For example:
```python
"openai/gpt-4"
"anthropic/claude-3-opus-latest"
"cohere/command-r-plus"
```
This format ensures Swarms knows which provider to use for the specified model.
## Available Model Providers
### OpenAI
??? info "OpenAI Models"
- **Provider name**: `openai`
- **Available Models**:
- `gpt-4`
- `gpt-3.5-turbo`
- `gpt-4-turbo-preview`
### Anthropic
??? info "Anthropic Models"
- **Provider name**: `anthropic`
- **Available Models**:
- **Claude 3 Opus**:
- `claude-3-opus-latest`
- `claude-3-opus-20240229`
- **Claude 3 Sonnet**:
- `claude-3-sonnet-20240229`
- `claude-3-5-sonnet-latest`
- `claude-3-5-sonnet-20240620`
- `claude-3-7-sonnet-latest`
- `claude-3-7-sonnet-20250219`
- `claude-3-5-sonnet-20241022`
- **Claude 3 Haiku**:
- `claude-3-haiku-20240307`
- `claude-3-5-haiku-20241022`
- `claude-3-5-haiku-latest`
- **Legacy Models**:
- `claude-2`
- `claude-2.1`
- `claude-instant-1`
- `claude-instant-1.2`
### Cohere
??? info "Cohere Models"
- **Provider name**: `cohere`
- **Available Models**:
- **Command**:
- `command`
- `command-r`
- `command-r-08-2024`
- `command-r7b-12-2024`
- **Command Light**:
- `command-light`
- **Command R Plus**:
- `command-r-plus`
- `command-r-plus-08-2024`
### Google
??? info "Google Models"
- **Provider name**: `google`
- **Available Models**:
- `gemini-pro`
- `gemini-pro-vision`
### Mistral
??? info "Mistral Models"
- **Provider name**: `mistral`
- **Available Models**:
- `mistral-tiny`
- `mistral-small`
- `mistral-medium`
## Using Different Models with Swarms
To use a different model with your Swarms agent, specify the model name in the `model_name` parameter when initializing the Agent, using the provider/model_name format:
```python
from swarms.structs.agent import Agent
# Using OpenAI's GPT-4
agent = Agent(
agent_name="Research-Agent",
model_name="openai/gpt-4o", # Note the provider/model_name format
# ... other parameters
)
# Using Anthropic's Claude
agent = Agent(
agent_name="Analysis-Agent",
model_name="anthropic/claude-3-sonnet-20240229", # Note the provider/model_name format
# ... other parameters
)
# Using Cohere's Command
agent = Agent(
agent_name="Text-Agent",
model_name="cohere/command-r-plus", # Note the provider/model_name format
# ... other parameters
)
```
## Model Configuration
When using different models, you can configure various parameters:
```python
agent = Agent(
agent_name="Custom-Agent",
model_name="openai/gpt-4",
temperature=0.7, # Controls randomness (0.0 to 1.0)
max_tokens=2000, # Maximum tokens in response
top_p=0.9, # Nucleus sampling parameter
frequency_penalty=0.0, # Reduces repetition
presence_penalty=0.0, # Encourages new topics
# ... other parameters
)
```
## Best Practices
### Model Selection
!!! tip "Choosing the Right Model"
- Choose models based on your specific use case
- Consider cost, performance, and feature requirements
- Test different models for your specific task
### Error Handling
!!! warning "Error Management"
- Implement proper error handling for model-specific errors
- Handle rate limits and API quotas appropriately
### Cost Management
!!! note "Cost Considerations"
- Monitor token usage and costs
- Use appropriate model sizes for your needs
## Example Use Cases
### 1. Complex Analysis (GPT-4)
```python
agent = Agent(
agent_name="Analysis-Agent",
model_name="openai/gpt-4", # Note the provider/model_name format
temperature=0.3, # Lower temperature for more focused responses
max_tokens=4000
)
```
### 2. Creative Tasks (Claude)
```python
agent = Agent(
agent_name="Creative-Agent",
model_name="anthropic/claude-3-sonnet-20240229", # Note the provider/model_name format
temperature=0.8, # Higher temperature for more creative responses
max_tokens=2000
)
```
### 3. Vision Tasks (Gemini)
```python
agent = Agent(
agent_name="Vision-Agent",
model_name="google/gemini-pro-vision", # Note the provider/model_name format
temperature=0.4,
max_tokens=1000
)
```
## Troubleshooting
!!! warning "Common Issues"
If you encounter issues with specific models:
1. Verify your API keys are correctly set
2. Check model availability in your region
3. Ensure you have sufficient quota/credits
4. Verify the model name is correct and supported
## Additional Resources
- [LiteLLM Documentation](https://docs.litellm.ai/){target=_blank}
- [OpenAI API Documentation](https://platform.openai.com/docs/api-reference){target=_blank}
- [Anthropic API Documentation](https://docs.anthropic.com/claude/reference/getting-started-with-the-api){target=_blank}
- [Google AI Documentation](https://ai.google.dev/docs){target=_blank}

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "7.7.5" version = "7.7.6"
description = "Swarms - TGSC" description = "Swarms - TGSC"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]

@ -25,7 +25,6 @@ from swarms.structs.stopping_conditions import (
) )
__all__ = [ __all__ = [
# "ToolAgent",
"check_done", "check_done",
"check_finished", "check_finished",
"check_complete", "check_complete",

Loading…
Cancel
Save