pull/634/head
Your Name 2 months ago
parent 5ee98b6b3a
commit 8e374b15be

@ -0,0 +1,68 @@
import os
from swarms import Agent
from swarm_models import OpenAIChat
from swarms.structs.agents_available import showcase_available_agents
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the Claims Director agent
director_agent = Agent(
agent_name="ClaimsDirector",
agent_description="Oversees and coordinates the medical insurance claims processing workflow",
system_prompt="""You are the Claims Director responsible for managing the medical insurance claims process.
Assign and prioritize tasks between claims processors and auditors. Ensure claims are handled efficiently
and accurately while maintaining compliance with insurance policies and regulations.""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="director_agent.json",
)
# Initialize Claims Processor agent
processor_agent = Agent(
agent_name="ClaimsProcessor",
agent_description="Reviews and processes medical insurance claims, verifying coverage and eligibility",
system_prompt="""Review medical insurance claims for completeness and accuracy. Verify patient eligibility,
coverage details, and process claims according to policy guidelines. Flag any claims requiring special review.""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="processor_agent.json",
)
# Initialize Claims Auditor agent
auditor_agent = Agent(
agent_name="ClaimsAuditor",
agent_description="Audits processed claims for accuracy and compliance with policies and regulations",
system_prompt="""Audit processed insurance claims for accuracy and compliance. Review claim decisions,
identify potential fraud or errors, and ensure all processing follows established guidelines and regulations.""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="auditor_agent.json",
)
# Create a list of agents
agents = [director_agent, processor_agent, auditor_agent]
print(showcase_available_agents(agents=agents))

@ -109,7 +109,6 @@ router = SwarmRouter(
swarm_type="SequentialWorkflow", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
auto_generate_prompts=True,
output_type="all",
)
# Example usage

@ -0,0 +1,99 @@
import os
from swarm_models import OpenAIChat
from swarms import Agent, run_agents_with_tasks_concurrently
# Fetch the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize agents for different roles
delaware_ccorp_agent = Agent(
agent_name="Delaware-CCorp-Hiring-Agent",
system_prompt="""
Create a comprehensive hiring description for a Delaware C Corporation,
including all relevant laws and regulations, such as the Delaware General
Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description
covers the requirements for hiring employees, contractors, and officers,
including the necessary paperwork, tax obligations, and benefits. Also,
outline the procedures for compliance with Delaware's employment laws,
including anti-discrimination laws, workers' compensation, and unemployment
insurance. Provide guidance on how to navigate the complexities of Delaware's
corporate law and ensure that all hiring practices are in compliance with
state and federal regulations.
""",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
output_type="str",
artifacts_on=True,
artifacts_output_path="delaware_ccorp_hiring_description.md",
artifacts_file_extension=".md",
)
indian_foreign_agent = Agent(
agent_name="Indian-Foreign-Hiring-Agent",
system_prompt="""
Create a comprehensive hiring description for an Indian or foreign country,
including all relevant laws and regulations, such as the Indian Contract Act,
the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA).
Ensure the description covers the requirements for hiring employees,
contractors, and officers, including the necessary paperwork, tax obligations,
and benefits. Also, outline the procedures for compliance with Indian and
foreign employment laws, including anti-discrimination laws, workers'
compensation, and unemployment insurance. Provide guidance on how to navigate
the complexities of Indian and foreign corporate law and ensure that all hiring
practices are in compliance with state and federal regulations. Consider the
implications of hiring foreign nationals and the requirements for obtaining
necessary visas and work permits.
""",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
output_type="str",
artifacts_on=True,
artifacts_output_path="indian_foreign_hiring_description.md",
artifacts_file_extension=".md",
)
# List of agents and corresponding tasks
agents = [delaware_ccorp_agent, indian_foreign_agent]
tasks = [
"""
Create a comprehensive hiring description for an Agent Engineer, including
required skills and responsibilities. Ensure the description covers the
necessary technical expertise, such as proficiency in AI/ML frameworks,
programming languages, and data structures. Outline the key responsibilities,
including designing and developing AI agents, integrating with existing systems,
and ensuring scalability and performance.
""",
"""
Generate a detailed job description for a Prompt Engineer, including
required skills and responsibilities. Ensure the description covers the
necessary technical expertise, such as proficiency in natural language processing,
machine learning, and software development. Outline the key responsibilities,
including designing and optimizing prompts for AI systems, ensuring prompt
quality and consistency, and collaborating with cross-functional teams.
""",
]
# Run agents with tasks concurrently
results = run_agents_with_tasks_concurrently(
agents,
tasks,
all_cores=True,
device="cpu",
)
# Print the results
for result in results:
print(result)

@ -0,0 +1,59 @@
# Swarms 6.0.0 - Performance & Reliability Update 🚀
We're excited to announce the release of Swarms 6.0.0, bringing significant improvements to performance, reliability, and developer experience. This release focuses on streamlining core functionalities while enhancing the overall stability of the framework.
## 📦 Installation
```bash
pip3 install -U swarms
```
## 🌟 Highlights
### Agent Enhancements
- **Improved RAG Performance**: Significant improvements to Retrieval-Augmented Generation capabilities
- **Enhanced Prompt Generation**: Auto-generate prompt now incorporates name, description, and system prompt for more contextual interactions
- **Streamlined Architecture**: Cleaned up unused code for better performance and maintainability
- **Simplified State Management**: Consolidated state management methods into a single `load()` function
### Tools & Execution
- **Optimized Environment Management**: Fixed multiple environment instantiation issue
- Environments now initialize once during `__init__`
- **New SwarmRouter Function**: Simplified routing mechanism
- Returns consolidated string output from all agents
- Improved coordination between swarm components
## 💪 Performance Improvements
- Faster execution times
- Reduced memory footprint
- More reliable logging system
- Lightweight and efficient codebase
## 🤝 Join Our Community
### We're Hiring!
Join our growing team! We're currently looking for:
- Agent Engineers
- Developer Relations
- Infrastructure Engineers
- And more!
### Get Involved
- ⭐ Star our repository
- 🔄 Fork the project
- 🛠 Submit pull requests
- 🐛 Report issues
- 💡 Share your ideas
### Contact & Support
- 📧 Email: kye@swarms.world
- 🔗 Issues: [GitHub Issues](https://github.com/kyegomez/swarms/issues)
## 🔜 What's Next?
Have ideas for features, bug fixes, or improvements? We'd love to hear from you! Reach out through our GitHub issues or email us directly.
---
*Thank you to all our contributors and users who make Swarms better every day. Together, we're building the future of swarm intelligence.*
#SwarmAI #OpenSource #AI #MachineLearning

@ -1,238 +1,231 @@
# GroupChat
# GroupChat Class Documentation
The `GroupChat` class is designed to manage a group chat session involving multiple agents. This class handles initializing the conversation, selecting the next speaker, resetting the chat, and executing the chat rounds, providing a structured approach to managing a dynamic and interactive conversation.
### Key Concepts
The GroupChat class manages multi-agent conversations with state persistence, comprehensive logging, and flexible agent configurations. It supports both Agent class instances and callable functions, making it versatile for different use cases.
## Installation
```bash
pip install swarms python-dotenv pydantic
```
- **Agents**: Entities participating in the group chat.
- **Conversation Management**: Handling the flow of conversation, selecting speakers, and maintaining chat history.
- **Round-based Execution**: Managing the chat in predefined rounds.
## Attributes
### Arguments
| Argument | Type | Default | Description |
|---------------------|----------------------|-------------|-------------|
| `agents` | `List[Agent]` | `None` | List of agents participating in the group chat. |
| `max_rounds` | `int` | `10` | Maximum number of chat rounds. |
| `admin_name` | `str` | `"Admin"` | Name of the admin user. |
| `group_objective` | `str` | `None` | Objective of the group chat. |
| `selector_agent` | `Agent` | `None` | Agent responsible for selecting the next speaker. |
| `rules` | `str` | `None` | Rules for the group chat. |
| `*args` | | | Variable length argument list. |
| `**kwargs` | | | Arbitrary keyword arguments. |
### Attributes
| Attribute | Type | Description |
|---------------------|----------------------|-------------|
| `agents` | `List[Agent]` | List of agents participating in the group chat. |
| `max_rounds` | `int` | Maximum number of chat rounds. |
| `admin_name` | `str` | Name of the admin user. |
| `group_objective` | `str` | Objective of the group chat. |
| `selector_agent` | `Agent` | Agent responsible for selecting the next speaker. |
| `messages` | `Conversation` | Conversation object for storing the chat messages. |
| Attribute | Type | Description |
|-----------|------|-------------|
| state_path | str | Path for saving/loading chat state |
| wrapped_agents | List[AgentWrapper] | List of wrapped agent instances |
| selector_agent | AgentWrapper | Agent responsible for speaker selection |
| state | GroupChatState | Current state of the group chat |
## Methods
### __init__
Initializes the group chat with the given parameters.
**Examples:**
### Core Methods
```python
agents = [Agent(name="Agent 1"), Agent(name="Agent 2")]
group_chat = GroupChat(agents=agents, max_rounds=5, admin_name="GroupAdmin")
```
### agent_names
Returns the names of the agents in the group chat.
def run(self, task: str) -> str:
"""Execute the group chat conversation"""
**Returns:**
def save_state(self) -> None:
"""Save current state to disk"""
| Return Type | Description |
|-------------|-------------|
| `List[str]` | List of agent names. |
@classmethod
def load_state(cls, state_path: str) -> 'GroupChat':
"""Load GroupChat from saved state"""
**Examples:**
def get_conversation_summary(self) -> Dict[str, Any]:
"""Return a summary of the conversation"""
```python
names = group_chat.agent_names
print(names) # Output: ['Agent 1', 'Agent 2']
def export_conversation(self, format: str = "json") -> Union[str, Dict]:
"""Export the conversation in specified format"""
```
### reset
Resets the group chat by clearing the message history.
**Examples:**
### Internal Methods
```python
group_chat.reset()
```
### agent_by_name
Finds an agent whose name is contained within the given name string.
**Arguments:**
def _log_interaction(self, agent_name: str, position: int, input_text: str, output_text: str) -> None:
"""Log a single interaction"""
| Parameter | Type | Description |
|-----------|--------|-------------|
| `name` | `str` | Name string to search for. |
def _add_message(self, role: str, content: str) -> None:
"""Add a message to the conversation history"""
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `Agent` | Agent object with a name contained in the given name string. |
**Raises:**
- `ValueError`: If no agent is found with a name contained in the given name string.
def select_next_speaker(self, last_speaker: AgentWrapper) -> AgentWrapper:
"""Select the next speaker using the selector agent"""
```
**Examples:**
## Usage Examples
### 1. Basic Setup with Two Agents
```python
agent = group_chat.agent_by_name("Agent 1")
print(agent.agent_name) # Output: 'Agent 1'
import os
from swarms import Agent
from swarm_models import OpenAIChat
# Initialize OpenAI
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(openai_api_key=api_key, model_name="gpt-4-mini")
# Create agents
analyst = Agent(
agent_name="Financial-Analyst",
system_prompt="You are a financial analyst...",
llm=model
)
advisor = Agent(
agent_name="Investment-Advisor",
system_prompt="You are an investment advisor...",
llm=model
)
# Create group chat
chat = GroupChat(
name="Investment Team",
agents=[analyst, advisor],
max_rounds=5,
group_objective="Provide investment advice"
)
response = chat.run("What's the best investment strategy for retirement?")
```
### next_agent
Returns the next agent in the list.
**Arguments:**
| Parameter | Type | Description |
|-----------|--------|-------------|
| `agent` | `Agent`| Current agent. |
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `Agent` | Next agent in the list. |
**Examples:**
### 2. Advanced Setup with State Management
```python
current_agent = group_chat.agents[0]
next_agent = group_chat.next_agent(current_agent)
print(next_agent.agent_name) # Output: Name of the next agent
# Create group chat with state persistence
chat = GroupChat(
name="Investment Advisory Team",
description="Expert team for financial planning",
agents=[analyst, advisor, tax_specialist],
max_rounds=10,
admin_name="Senior Advisor",
group_objective="Provide comprehensive financial planning",
state_path="investment_chat_state.json",
rules="1. Always provide sources\n2. Be concise\n3. Focus on practical advice"
)
# Run chat and save state
response = chat.run("Create a retirement plan for a 35-year old")
chat.save_state()
# Load existing chat state
loaded_chat = GroupChat.load_state("investment_chat_state.json")
```
### select_speaker_msg
Returns the message for selecting the next speaker.
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `str` | Prompt message for selecting the next speaker. |
**Examples:**
### 3. Using Custom Callable Agents
```python
message = group_chat.select_speaker_msg()
print(message)
def custom_agent(input_text: str) -> str:
# Custom logic here
return f"Processed: {input_text}"
# Mix of regular agents and callable functions
chat = GroupChat(
name="Hybrid Team",
agents=[analyst, custom_agent],
max_rounds=3
)
```
### select_speaker
Selects the next speaker.
**Arguments:**
| Parameter | Type | Description |
|----------------------|--------|-------------|
| `last_speaker_agent` | `Agent`| Last speaker in the conversation. |
| `selector_agent` | `Agent`| Agent responsible for selecting the next speaker. |
**Returns:**
### 4. Export and Analysis
```python
# Run chat
chat.run("Analyze market conditions")
| Return Type | Description |
|-------------|-------------|
| `Agent` | Next speaker. |
# Get summary
summary = chat.get_conversation_summary()
print(summary)
**Examples:**
# Export in different formats
json_conv = chat.export_conversation(format="json")
text_conv = chat.export_conversation(format="text")
```
### 5. Advanced Configuration with Custom Selector
```python
next_speaker = group_chat.select_speaker(last_speaker_agent, selector_agent)
print(next_speaker.agent_name)
class CustomSelector(Agent):
def run(self, input_text: str) -> str:
# Custom selection logic
return "Financial-Analyst"
chat = GroupChat(
name="Custom Selection Team",
agents=[analyst, advisor],
selector_agent=CustomSelector(
agent_name="Custom-Selector",
system_prompt="Select the next speaker based on expertise",
llm=model
),
max_rounds=5
)
```
### _participant_roles
Returns the roles of the participants.
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `str` | Participant roles. |
**Examples:**
### 6. Debugging Setup
```python
roles = group_chat._participant_roles()
print(roles)
import logging
# Configure logging
logging.basicConfig(level=logging.DEBUG)
chat = GroupChat(
name="Debug Team",
agents=[analyst, advisor],
max_rounds=3,
state_path="debug_chat.json"
)
# Run with detailed logging
try:
response = chat.run("Complex query")
except Exception as e:
logger.error(f"Chat failed: {str(e)}")
# Access last successful state
state = chat.state
```
### __call__
## Error Handling
Executes the group chat as a function.
**Arguments:**
| Parameter | Type | Description |
|-----------|--------|-------------|
| `task` | `str` | Task to be performed. |
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `str` | Reply from the last speaker. |
**Examples:**
The GroupChat class includes comprehensive error handling:
```python
response = group_chat(task="Discuss the project plan")
print(response)
try:
chat = GroupChat(agents=[analyst]) # Will raise ValueError
except ValueError as e:
print("Configuration error:", str(e))
try:
response = chat.run("Query")
except Exception as e:
# Access error state
error_summary = chat.get_conversation_summary()
print("Execution error:", str(e))
print("State at error:", error_summary)
```
### Additional Examples
## Best Practices
#### Example 1: Initializing and Running a Group Chat
1. **State Management**:
- Always specify a `state_path` for important conversations
- Use `save_state()` after critical operations
- Implement regular state backups for long conversations
```python
agents = [Agent(name="Agent 1"), Agent(name="Agent 2"), Agent(name="Agent 3")]
selector_agent = Agent(name="Selector")
group_chat = GroupChat(agents=agents, selector_agent=selector_agent, max_rounds=3, group_objective="Discuss the quarterly goals.")
2. **Agent Configuration**:
- Provide clear system prompts for each agent
- Use descriptive agent names
- Consider agent expertise when setting the group objective
response = group_chat(task="Let's start the discussion on quarterly goals.")
print(response)
```
3. **Performance**:
- Keep `max_rounds` reasonable (5-10 for most cases)
- Use early stopping conditions when possible
- Monitor conversation length and complexity
#### Example 2: Resetting the Group Chat
4. **Error Handling**:
- Always wrap chat execution in try-except blocks
- Implement proper logging
- Save states before potentially risky operations
```python
group_chat.reset()
```
#### Example 3: Selecting the Next Speaker
```python
last_speaker = group_chat.agents[0]
next_speaker = group_chat.select_speaker(last_speaker_agent=last_speaker, selector_agent=selector_agent)
print(next_speaker.agent_name)
```
## Limitations
## Summary
- Agents must either have a `run` method or be callable
- State files can grow large with many interactions
- Selector agent may need optimization for large agent groups
- Real-time streaming not supported in basic configuration
The `GroupChat` class offers a structured approach to managing a group chat involving multiple agents. With functionalities for initializing conversations, selecting speakers, and handling chat rounds, it provides a robust framework for dynamic and interactive discussions. This makes it an essential tool for applications requiring coordinated communication among multiple agents.

@ -0,0 +1,113 @@
import os
from swarms import Agent
from swarm_models import OpenAIChat
from dotenv import load_dotenv
# Custom system prompt for VC legal document generation
VC_LEGAL_AGENT_PROMPT = """You are a specialized legal document assistant focusing on venture capital documentation.
Your role is to help draft preliminary versions of common VC legal documents while adhering to these guidelines:
1. Always include standard legal disclaimers
2. Follow standard VC document structures
3. Flag areas that need attorney review
4. Request necessary information for document completion
5. Maintain consistency across related documents
6. Output <DONE> only when document is complete and verified
Remember: All output should be marked as 'DRAFT' and require professional legal review."""
def create_vc_legal_agent():
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
# Configure the model with appropriate parameters for legal work
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize the persistent agent
agent = Agent(
agent_name="VC-Legal-Document-Agent",
system_prompt=VC_LEGAL_AGENT_PROMPT,
llm=model,
max_loops="auto", # Allows multiple iterations until completion
stopping_token="<DONE>", # Agent will continue until this token is output
autosave=True,
dashboard=True, # Enable dashboard for monitoring
verbose=True,
dynamic_temperature_enabled=False, # Disable for consistency in legal documents
saved_state_path="vc_legal_agent_state.json",
user_name="legal_corp",
retry_attempts=3,
context_length=200000,
return_step_meta=True,
output_type="string",
streaming_on=False,
)
return agent
def generate_legal_document(agent, document_type, parameters):
"""
Generate a legal document with multiple refinement iterations
Args:
agent: The initialized VC legal agent
document_type: Type of document to generate (e.g., "term_sheet", "investment_agreement")
parameters: Dict containing necessary parameters for the document
Returns:
str: The generated document content
"""
prompt = f"""
Generate a {document_type} with the following parameters:
{parameters}
Please follow these steps:
1. Create initial draft
2. Review for completeness
3. Add necessary legal disclaimers
4. Verify all required sections
5. Output <DONE> when complete
Include [REQUIRES LEGAL REVIEW] tags for sections needing attorney attention.
"""
return agent.run(prompt)
# Example usage
if __name__ == "__main__":
# Initialize the agent
legal_agent = create_vc_legal_agent()
# Example parameters for a term sheet
parameters = {
"company_name": "TechStartup Inc.",
"investment_amount": "$5,000,000",
"valuation": "$20,000,000",
"investor_rights": [
"Board seat",
"Pro-rata rights",
"Information rights",
],
"type_of_security": "Series A Preferred Stock",
}
# Generate a term sheet
document = generate_legal_document(
legal_agent, "term_sheet", parameters
)
# Save the generated document
with open("generated_term_sheet_draft.md", "w") as f:
f.write(document)

@ -1,44 +0,0 @@
import os
from swarms_memory import ChromaDB
from swarms import Agent
from swarm_models import Anthropic
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
# Initilaize the chromadb client
chromadb = ChromaDB(
metric="cosine",
output_dir="fiance_agent_rag",
# docs_folder="artifacts", # Folder of your documents
)
# Model
model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
agent_description="Agent creates ",
llm=model,
max_loops="auto",
autosave=True,
dashboard=False,
verbose=True,
streaming_on=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=3,
context_length=200000,
long_term_memory=chromadb,
)
agent.run(
"What are the components of a startups stock incentive equity plan"
)

@ -1,117 +0,0 @@
from swarms import Agent
from swarm_models import OpenAIChat
from swarms_memory import ChromaDB
import subprocess
import os
# Making an instance of the ChromaDB class
memory = ChromaDB(
metric="cosine",
n_results=3,
output_dir="results",
docs_folder="docs",
)
# Model
model = OpenAIChat(
api_key=os.getenv("OPENAI_API_KEY"),
model_name="gpt-4o-mini",
temperature=0.1,
)
# Tools in swarms are simple python functions and docstrings
def terminal(
code: str,
):
"""
Run code in the terminal.
Args:
code (str): The code to run in the terminal.
Returns:
str: The output of the code.
"""
out = subprocess.run(
code, shell=True, capture_output=True, text=True
).stdout
return str(out)
def browser(query: str):
"""
Search the query in the browser with the `browser` tool.
Args:
query (str): The query to search in the browser.
Returns:
str: The search results.
"""
import webbrowser
url = f"https://www.google.com/search?q={query}"
webbrowser.open(url)
return f"Searching for {query} in the browser."
def create_file(file_path: str, content: str):
"""
Create a file using the file editor tool.
Args:
file_path (str): The path to the file.
content (str): The content to write to the file.
Returns:
str: The result of the file creation operation.
"""
with open(file_path, "w") as file:
file.write(content)
return f"File {file_path} created successfully."
def file_editor(file_path: str, mode: str, content: str):
"""
Edit a file using the file editor tool.
Args:
file_path (str): The path to the file.
mode (str): The mode to open the file in.
content (str): The content to write to the file.
Returns:
str: The result of the file editing operation.
"""
with open(file_path, mode) as file:
file.write(content)
return f"File {file_path} edited successfully."
# Agent
agent = Agent(
agent_name="Devin",
system_prompt=(
"Autonomous agent that can interact with humans and other"
" agents. Be Helpful and Kind. Use the tools provided to"
" assist the user. Return all code in markdown format."
),
llm=model,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
streaming=True,
long_term_memory=memory,
)
# Run the agent
out = agent(
"Create a CSV file with the latest tax rates for C corporations in the following ten states and the District of Columbia: Alabama, California, Florida, Georgia, Illinois, New York, North Carolina, Ohio, Texas, and Washington."
)
print(out)

@ -0,0 +1,319 @@
"""
Zoe - Real Estate Agent
"""
from typing import Optional, Dict, Any, List
from dataclasses import dataclass
from datetime import datetime
import os
import json
import requests
from loguru import logger
from swarms import Agent
from swarm_models import OpenAIChat
from dotenv import load_dotenv
from enum import Enum
# Configure loguru logger
logger.add(
"logs/real_estate_agent_{time}.log",
rotation="500 MB",
retention="10 days",
level="INFO",
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
)
class PropertyType(str, Enum):
"""Enum for property types"""
OFFICE = "office"
RETAIL = "retail"
INDUSTRIAL = "industrial"
MIXED_USE = "mixed-use"
LAND = "land"
@dataclass
class PropertyListing:
"""Data class for commercial property listings"""
property_id: str
address: str
city: str
state: str
zip_code: str
price: float
square_footage: float
property_type: PropertyType
zoning: str
listing_date: datetime
lat: float
lng: float
description: Optional[str] = None
features: Optional[List[str]] = None
images: Optional[List[str]] = None
class PropertyRadarAPI:
"""Client for PropertyRadar API integration"""
def __init__(self, api_key: str):
"""Initialize PropertyRadar API client
Args:
api_key (str): PropertyRadar API key
"""
self.api_key = api_key
self.base_url = "https://api.propertyradar.com/v1"
self.session = requests.Session()
self.session.headers.update(
{
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
)
def search_properties(
self,
max_price: float = 10_000_000,
property_types: List[PropertyType] = None,
location: Dict[str, Any] = None,
min_sqft: Optional[float] = None,
max_sqft: Optional[float] = None,
page: int = 1,
limit: int = 20,
) -> List[PropertyListing]:
"""
Search for commercial properties using PropertyRadar API
Args:
max_price (float): Maximum property price
property_types (List[PropertyType]): Types of properties to search for
location (Dict[str, Any]): Location criteria (city, county, or coordinates)
min_sqft (Optional[float]): Minimum square footage
max_sqft (Optional[float]): Maximum square footage
page (int): Page number for pagination
limit (int): Number of results per page
Returns:
List[PropertyListing]: List of matching properties
"""
try:
# Build the query parameters
params = {
"price_max": max_price,
"property_types": (
[pt.value for pt in property_types]
if property_types
else None
),
"page": page,
"limit": limit,
"for_sale": True,
"state": "FL", # Florida only
"commercial_property": True,
}
# Add location parameters
if location:
params.update(location)
# Add square footage filters
if min_sqft:
params["square_feet_min"] = min_sqft
if max_sqft:
params["square_feet_max"] = max_sqft
# Make the API request
response = self.session.get(
f"{self.base_url}/properties",
params={
k: v for k, v in params.items() if v is not None
},
)
response.raise_for_status()
# Parse the response
properties_data = response.json()
# Convert to PropertyListing objects
return [
PropertyListing(
property_id=prop["id"],
address=prop["address"],
city=prop["city"],
state=prop["state"],
zip_code=prop["zip_code"],
price=float(prop["price"]),
square_footage=float(prop["square_feet"]),
property_type=PropertyType(prop["property_type"]),
zoning=prop["zoning"],
listing_date=datetime.fromisoformat(
prop["list_date"]
),
lat=float(prop["latitude"]),
lng=float(prop["longitude"]),
description=prop.get("description"),
features=prop.get("features", []),
images=prop.get("images", []),
)
for prop in properties_data["results"]
]
except requests.RequestException as e:
logger.error(f"Error fetching properties: {str(e)}")
raise
class CommercialRealEstateAgent:
"""Agent for searching and analyzing commercial real estate properties"""
def __init__(
self,
openai_api_key: str,
propertyradar_api_key: str,
model_name: str = "gpt-4",
temperature: float = 0.1,
saved_state_path: Optional[str] = None,
):
"""Initialize the real estate agent
Args:
openai_api_key (str): OpenAI API key
propertyradar_api_key (str): PropertyRadar API key
model_name (str): Name of the LLM model to use
temperature (float): Temperature setting for the LLM
saved_state_path (Optional[str]): Path to save agent state
"""
self.property_api = PropertyRadarAPI(propertyradar_api_key)
# Initialize OpenAI model
self.model = OpenAIChat(
openai_api_key=openai_api_key,
model_name=model_name,
temperature=temperature,
)
# Initialize the agent
self.agent = Agent(
agent_name="Commercial-Real-Estate-Agent",
system_prompt=self._get_system_prompt(),
llm=self.model,
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
saved_state_path=saved_state_path,
context_length=200000,
streaming_on=False,
)
logger.info(
"Commercial Real Estate Agent initialized successfully"
)
def _get_system_prompt(self) -> str:
"""Get the system prompt for the agent"""
return """You are a specialized commercial real estate agent assistant focused on Central Florida properties.
Your primary responsibilities are:
1. Search for commercial properties under $10 million
2. Focus on properties zoned for commercial use
3. Provide detailed analysis of property features, location benefits, and potential ROI
4. Consider local market conditions and growth potential
5. Verify zoning compliance and restrictions
When analyzing properties, consider:
- Current market valuations
- Local business development plans
- Traffic patterns and accessibility
- Nearby amenities and businesses
- Future development potential"""
def search_properties(
self,
max_price: float = 10_000_000,
property_types: List[PropertyType] = None,
location: Dict[str, Any] = None,
min_sqft: Optional[float] = None,
max_sqft: Optional[float] = None,
) -> List[Dict[str, Any]]:
"""
Search for properties and provide analysis
Args:
max_price (float): Maximum property price
property_types (List[PropertyType]): Types of properties to search
location (Dict[str, Any]): Location criteria
min_sqft (Optional[float]): Minimum square footage
max_sqft (Optional[float]): Maximum square footage
Returns:
List[Dict[str, Any]]: List of properties with analysis
"""
try:
# Search for properties
properties = self.property_api.search_properties(
max_price=max_price,
property_types=property_types,
location=location,
min_sqft=min_sqft,
max_sqft=max_sqft,
)
# Analyze each property
analyzed_properties = []
for prop in properties:
analysis = self.agent.run(
f"Analyze this commercial property:\n"
f"Address: {prop.address}, {prop.city}, FL {prop.zip_code}\n"
f"Price: ${prop.price:,.2f}\n"
f"Square Footage: {prop.square_footage:,.0f}\n"
f"Property Type: {prop.property_type.value}\n"
f"Zoning: {prop.zoning}\n"
f"Description: {prop.description or 'Not provided'}"
)
analyzed_properties.append(
{"property": prop.__dict__, "analysis": analysis}
)
logger.info(
f"Successfully analyzed {len(analyzed_properties)} properties"
)
return analyzed_properties
except Exception as e:
logger.error(
f"Error in property search and analysis: {str(e)}"
)
raise
def main():
"""Main function to demonstrate usage"""
load_dotenv()
# Initialize the agent
agent = CommercialRealEstateAgent(
openai_api_key=os.getenv("OPENAI_API_KEY"),
propertyradar_api_key=os.getenv("PROPERTYRADAR_API_KEY"),
saved_state_path="real_estate_agent_state.json",
)
# Example search
results = agent.search_properties(
max_price=5_000_000,
property_types=[PropertyType.RETAIL, PropertyType.OFFICE],
location={"city": "Orlando", "radius_miles": 25},
min_sqft=2000,
)
# Save results
with open("search_results.json", "w") as f:
json.dump(results, f, default=str, indent=2)
if __name__ == "__main__":
main()

@ -0,0 +1,119 @@
import os
from swarms import Agent, AgentRearrange
from swarm_models import OpenAIChat
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the boss agent (Director)
boss_agent = Agent(
agent_name="BossAgent",
system_prompt="""
You are the BossAgent responsible for managing and overseeing a swarm of agents analyzing company expenses.
Your job is to dynamically assign tasks, prioritize their execution, and ensure that all agents collaborate efficiently.
After receiving a report on the company's expenses, you will break down the work into smaller tasks,
assigning specific tasks to each agent, such as detecting recurring high costs, categorizing expenditures,
and identifying unnecessary transactions. Ensure the results are communicated back in a structured way
so the finance team can take actionable steps to cut off unproductive spending. You also monitor and
dynamically adapt the swarm to optimize their performance. Finally, you summarize their findings
into a coherent report.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="boss_agent.json",
)
# Initialize worker 1: Expense Analyzer
worker1 = Agent(
agent_name="ExpenseAnalyzer",
system_prompt="""
Your task is to carefully analyze the company's expense data provided to you.
You will focus on identifying high-cost recurring transactions, categorizing expenditures
(e.g., marketing, operations, utilities, etc.), and flagging areas where there seems to be excessive spending.
You will provide a detailed breakdown of each category, along with specific recommendations for cost-cutting.
Pay close attention to monthly recurring subscriptions, office supplies, and non-essential expenditures.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker1.json",
)
# Initialize worker 2: Summary Generator
worker2 = Agent(
agent_name="SummaryGenerator",
system_prompt="""
After receiving the detailed breakdown from the ExpenseAnalyzer,
your task is to create a concise summary of the findings. You will focus on the most actionable insights,
such as highlighting the specific transactions that can be immediately cut off and summarizing the areas
where the company is overspending. Your summary will be used by the BossAgent to generate the final report.
Be clear and to the point, emphasizing the urgency of cutting unnecessary expenses.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker2.json",
)
# Swarm-Level Prompt (Collaboration Prompt)
swarm_prompt = """
As a swarm, your collective goal is to analyze the company's expenses and identify transactions that should be cut off.
You will work collaboratively to break down the entire process of expense analysis into manageable steps.
The BossAgent will direct the flow and assign tasks dynamically to the agents. The ExpenseAnalyzer will first
focus on breaking down the expense report, identifying high-cost recurring transactions, categorizing them,
and providing recommendations for potential cost reduction. After the analysis, the SummaryGenerator will then
consolidate all the findings into an actionable summary that the finance team can use to immediately cut off unnecessary expenses.
Together, your collaboration is essential to streamlining and improving the companys financial health.
"""
# Create a list of agents
agents = [boss_agent, worker1, worker2]
# Define the flow pattern for the swarm
flow = "BossAgent -> ExpenseAnalyzer -> SummaryGenerator"
# Using AgentRearrange class to manage the swarm
agent_system = AgentRearrange(
agents=agents,
flow=flow,
return_json=False,
output_type="final",
max_loops=1,
docs=["SECURITY.md"],
)
# Input task for the swarm
task = f"""
{swarm_prompt}
The company has been facing a rising number of unnecessary expenses, and the finance team needs a detailed
analysis of recent transactions to identify which expenses can be cut off to improve profitability.
Analyze the provided transaction data and create a detailed report on cost-cutting opportunities,
focusing on recurring transactions and non-essential expenditures.
"""
# Run the swarm system with the task
output = agent_system.run(task)
print(output)

@ -0,0 +1,52 @@
#!/bin/bash
# Set up logging
LOG_FILE="docs_compilation.log"
OUTPUT_FILE="combined_docs.txt"
# Initialize log file
echo "$(date): Starting documentation compilation" > "$LOG_FILE"
# Create/clear output file
> "$OUTPUT_FILE"
# Function to determine file type and handle accordingly
process_file() {
local file="$1"
# Get file extension
extension="${file##*.}"
echo "$(date): Processing $file" >> "$LOG_FILE"
case "$extension" in
md|markdown)
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
cat "$file" >> "$OUTPUT_FILE"
echo -e "\n\n" >> "$OUTPUT_FILE"
;;
txt)
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
cat "$file" >> "$OUTPUT_FILE"
echo -e "\n\n" >> "$OUTPUT_FILE"
;;
*)
echo "$(date): Skipping $file - unsupported format" >> "$LOG_FILE"
return
;;
esac
echo "$(date): Successfully processed $file" >> "$LOG_FILE"
}
# Find and process all documentation files
find ../docs -type f \( -name "*.md" -o -name "*.txt" -o -name "*.markdown" \) | while read -r file; do
process_file "$file"
done
# Log completion
echo "$(date): Documentation compilation complete" >> "$LOG_FILE"
echo "$(date): Output saved to $OUTPUT_FILE" >> "$LOG_FILE"
# Print summary
echo "Documentation compilation complete. Check $LOG_FILE for details."

@ -0,0 +1,117 @@
import os
from dotenv import load_dotenv
from swarms import Agent, SequentialWorkflow
from swarm_models import OpenAIChat
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize specialized agents
data_extractor_agent = Agent(
agent_name="Data-Extractor",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="data_extractor_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
summarizer_agent = Agent(
agent_name="Document-Summarizer",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="summarizer_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
financial_analyst_agent = Agent(
agent_name="Financial-Analyst",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="financial_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
market_analyst_agent = Agent(
agent_name="Market-Analyst",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="market_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
operational_analyst_agent = Agent(
agent_name="Operational-Analyst",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="operational_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
# Initialize the SwarmRouter
router = SequentialWorkflow(
name="pe-document-analysis-swarm",
description="Analyze documents for private equity due diligence and investment decision-making",
max_loops=1,
agents=[
data_extractor_agent,
summarizer_agent,
financial_analyst_agent,
market_analyst_agent,
operational_analyst_agent,
],
output_type="all",
)
# Example usage
if __name__ == "__main__":
# Run a comprehensive private equity document analysis task
result = router.run(
"Where is the best place to find template term sheets for series A startups. Provide links and references"
)
print(result)

@ -1,120 +0,0 @@
from swarms.utils.loguru_logger import logger
import yaml
from pydantic import BaseModel
from typing import List, Optional
import json
from swarms.structs.agent_registry import AgentRegistry
from swarms.structs.agent import Agent
from swarm_models.popular_llms import OpenAIChat
class AgentInput(BaseModel):
agent_name: str = "Swarm Agent"
system_prompt: Optional[str] = None
agent_description: Optional[str] = None
model_name: str = "OpenAIChat"
max_loops: int = 1
autosave: bool = False
dynamic_temperature_enabled: bool = False
dashboard: bool = False
verbose: bool = False
streaming_on: bool = True
saved_state_path: Optional[str] = None
sop: Optional[str] = None
sop_list: Optional[List[str]] = None
user_name: str = "User"
retry_attempts: int = 3
context_length: int = 8192
task: Optional[str] = None
interactive: bool = False
def parse_yaml_to_json(yaml_str: str) -> str:
"""
Parses the given YAML string into an AgentInput model and converts it to a JSON string.
Args:
yaml_str (str): The YAML string to be parsed.
Returns:
str: The JSON string representation of the parsed YAML.
Raises:
ValueError: If the YAML string cannot be parsed into the AgentInput model.
"""
try:
data = yaml.safe_load(yaml_str)
agent_input = AgentInput(**data)
return agent_input.json()
except yaml.YAMLError as e:
print(f"YAML Error: {e}")
raise ValueError("Invalid YAML input.") from e
except ValueError as e:
print(f"Validation Error: {e}")
raise ValueError("Invalid data for AgentInput model.") from e
# # Example usage
# yaml_input = """
# agent_name: "Custom Agent"
# system_prompt: "System prompt example"
# agent_description: "This is a test agent"
# model_name: "CustomModel"
# max_loops: 5
# autosave: true
# dynamic_temperature_enabled: true
# dashboard: true
# verbose: true
# streaming_on: false
# saved_state_path: "/path/to/state"
# sop: "Standard operating procedure"
# sop_list: ["step1", "step2"]
# user_name: "Tester"
# retry_attempts: 5
# context_length: 4096
# task: "Perform testing"
# """
# json_output = parse_yaml_to_json(yaml_input)
# print(json_output)
registry = AgentRegistry()
def create_agent_from_yaml(yaml_path: str) -> None:
with open(yaml_path, "r") as file:
yaml_str = file.read()
agent_json = parse_yaml_to_json(yaml_str)
agent_config = json.loads(agent_json)
agent = Agent(
agent_name=agent_config.get("agent_name", "Swarm Agent"),
system_prompt=agent_config.get("system_prompt"),
agent_description=agent_config.get("agent_description"),
llm=OpenAIChat(),
max_loops=agent_config.get("max_loops", 1),
autosave=agent_config.get("autosave", False),
dynamic_temperature_enabled=agent_config.get(
"dynamic_temperature_enabled", False
),
dashboard=agent_config.get("dashboard", False),
verbose=agent_config.get("verbose", False),
streaming_on=agent_config.get("streaming_on", True),
saved_state_path=agent_config.get("saved_state_path"),
retry_attempts=agent_config.get("retry_attempts", 3),
context_length=agent_config.get("context_length", 8192),
)
registry.add(agent.agent_name, agent)
logger.info(f"Agent {agent.agent_name} created from {yaml_path}.")
def run_agent(agent_name: str, task: str) -> None:
agent = registry.find_agent_by_name(agent_name)
agent.run(task)
def list_agents() -> None:
agents = registry.list_agents()
for agent_id in agents:
print(agent_id)

@ -1,10 +0,0 @@
from typing import List
from pydantic import BaseModel
from swarms.schemas.agent_step_schemas import Step
class Plan(BaseModel):
steps: List[Step]
class Config:
orm_mode = True

@ -19,7 +19,6 @@ from swarms.structs.majority_voting import (
parse_code_completion,
)
from swarms.structs.message import Message
from swarms.structs.message_pool import MessagePool
from swarms.structs.mixture_of_agents import MixtureOfAgents
from swarms.structs.multi_agent_collab import MultiAgentCollaboration
@ -93,7 +92,6 @@ __all__ = [
"most_frequent",
"parse_code_completion",
"Message",
"MessagePool",
"MultiAgentCollaboration",
"SwarmNetwork",
"AgentRearrange",

@ -1,3 +1,4 @@
from datetime import datetime
import asyncio
import json
import logging
@ -177,6 +178,7 @@ class Agent:
artifacts_on (bool): Enable artifacts
artifacts_output_path (str): The artifacts output path
artifacts_file_extension (str): The artifacts file extension (.pdf, .md, .txt, )
scheduled_run_date (datetime): The date and time to schedule the task
Methods:
run: Run the agent
@ -333,6 +335,7 @@ class Agent:
device: str = "cpu",
all_cores: bool = True,
device_id: int = 0,
scheduled_run_date: Optional[datetime] = None,
*args,
**kwargs,
):
@ -445,6 +448,7 @@ class Agent:
self.device = device
self.all_cores = all_cores
self.device_id = device_id
self.scheduled_run_date = scheduled_run_date
# Initialize the short term memory
self.short_memory = Conversation(
@ -733,7 +737,9 @@ class Agent:
# Check parameters
def check_parameters(self):
if self.llm is None:
raise ValueError("Language model is not provided. Choose a model from the available models in swarm_models or create a class with a run(task: str) method and or a __call__ method.")
raise ValueError(
"Language model is not provided. Choose a model from the available models in swarm_models or create a class with a run(task: str) method and or a __call__ method."
)
if self.max_loops is None or self.max_loops == 0:
raise ValueError("Max loops is not provided")
@ -743,8 +749,6 @@ class Agent:
if self.context_length == 0 or self.context_length is None:
raise ValueError("Context length is not provided")
# Main function
def _run(
@ -2245,14 +2249,17 @@ class Agent:
device: str = "cpu", # gpu
device_id: int = 0,
all_cores: bool = True,
scheduled_run_date: Optional[datetime] = None,
*args,
**kwargs,
) -> Any:
"""
Executes the agent's run method on a specified device.
Executes the agent's run method on a specified device, with optional scheduling.
This method attempts to execute the agent's run method on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`.
If a `scheduled_date` is provided, the method will wait until that date and time before executing the task.
Args:
task (Optional[str], optional): The task to be executed. Defaults to None.
img (Optional[str], optional): The image to be processed. Defaults to None.
@ -2260,6 +2267,7 @@ class Agent:
device (str, optional): The device to use for execution. Defaults to "cpu".
device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
scheduled_run_date (Optional[datetime], optional): The date and time to schedule the task. Defaults to None.
*args: Additional positional arguments to be passed to the execution method.
**kwargs: Additional keyword arguments to be passed to the execution method.
@ -2273,6 +2281,12 @@ class Agent:
device = device or self.device
device_id = device_id or self.device_id
if scheduled_run_date:
while datetime.now() < scheduled_run_date:
time.sleep(
1
) # Sleep for a short period to avoid busy waiting
try:
logger.info(f"Attempting to run on device: {device}")
if device == "cpu":

@ -0,0 +1,93 @@
from typing import List, Any
from loguru import logger
from swarms.structs.agent import Agent
def get_agent_name(agent: Any) -> str:
"""Helper function to safely get agent name
Args:
agent (Any): The agent object to get name from
Returns:
str: The agent's name if found, 'Unknown' otherwise
"""
if isinstance(agent, Agent) and hasattr(agent, "agent_name"):
return agent.agent_name
return "Unknown"
def get_agent_description(agent: Any) -> str:
"""Helper function to get agent description or system prompt preview
Args:
agent (Any): The agent object
Returns:
str: Description or first 100 chars of system prompt
"""
if not isinstance(agent, Agent):
return "N/A"
if hasattr(agent, "description") and agent.description:
return agent.description
if hasattr(agent, "system_prompt") and agent.system_prompt:
return f"{agent.system_prompt[:150]}..."
return "N/A"
def showcase_available_agents(
name: str = None,
description: str = None,
agents: List[Agent] = [],
update_agents_on: bool = False,
) -> str:
"""
Generate a formatted string showcasing all available agents and their descriptions.
Args:
agents (List[Agent]): List of Agent objects to showcase.
update_agents_on (bool, optional): If True, updates each agent's system prompt with
the showcase information. Defaults to False.
Returns:
str: Formatted string containing agent information, including names, descriptions
and IDs for all available agents.
"""
logger.info(f"Showcasing {len(agents)} available agents")
formatted_agents = []
header = f"\n####### Agents available in the swarm: {name} ############\n"
header += f"{description}\n"
row_format = "{:<5} | {:<20} | {:<50}"
header_row = row_format.format("ID", "Agent Name", "Description")
separator = "-" * 80
formatted_agents.append(header)
formatted_agents.append(separator)
formatted_agents.append(header_row)
formatted_agents.append(separator)
for idx, agent in enumerate(agents):
if not isinstance(agent, Agent):
logger.warning(
f"Skipping non-Agent object: {type(agent)}"
)
continue
agent_name = get_agent_name(agent)
description = (
get_agent_description(agent)[:100] + "..."
if len(get_agent_description(agent)) > 100
else get_agent_description(agent)
)
formatted_agents.append(
row_format.format(idx + 1, agent_name, description)
)
showcase = "\n".join(formatted_agents)
return showcase

@ -1,3 +0,0 @@
"""
This class will input a swarm type -> then auto generate a list of `Agent` structures with their name, descriptions, system prompts, and more.
"""

@ -0,0 +1,299 @@
from loguru import logger
import os
from typing import List
from pydantic import BaseModel, Field
from swarm_models import OpenAIFunctionCaller, OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.swarm_router import SwarmRouter
class AgentConfig(BaseModel):
"""Configuration for an individual agent in a swarm"""
name: str = Field(
description="The name of the agent", example="Research-Agent"
)
description: str = Field(
description="A description of the agent's purpose and capabilities",
example="Agent responsible for researching and gathering information",
)
system_prompt: str = Field(
description="The system prompt that defines the agent's behavior",
example="You are a research agent. Your role is to gather and analyze information...",
)
max_loops: int = Field(
description="Maximum number of reasoning loops the agent can perform",
example=3,
)
class SwarmConfig(BaseModel):
"""Configuration for a swarm of cooperative agents"""
name: str = Field(
description="The name of the swarm",
example="Research-Writing-Swarm",
)
description: str = Field(
description="The description of the swarm's purpose and capabilities",
example="A swarm of agents that work together to research topics and write articles",
)
agents: List[AgentConfig] = Field(
description="The list of agents that make up the swarm",
example=[
AgentConfig(
name="Research-Agent",
description="Gathers information",
system_prompt="You are a research agent...",
max_loops=2,
),
AgentConfig(
name="Writing-Agent",
description="Writes content",
system_prompt="You are a writing agent...",
max_loops=1,
),
],
)
max_loops: int = Field(
description="The maximum number of loops to run the swarm",
example=1,
)
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
BOSS_SYSTEM_PROMPT = """
Manage a swarm of worker agents to efficiently serve the user by deciding whether to create new agents or delegate tasks. Ensure operations are efficient and effective.
### Instructions:
1. **Task Assignment**:
- Analyze available worker agents when a task is presented.
- Delegate tasks to existing agents with clear, direct, and actionable instructions if an appropriate agent is available.
- If no suitable agent exists, create a new agent with a fitting system prompt to handle the task.
2. **Agent Creation**:
- Name agents according to the task they are intended to perform (e.g., "Twitter Marketing Agent").
- Provide each new agent with a concise and clear system prompt that includes its role, objectives, and any tools it can utilize.
3. **Efficiency**:
- Minimize redundancy and maximize task completion speed.
- Avoid unnecessary agent creation if an existing agent can fulfill the task.
4. **Communication**:
- Be explicit in task delegation instructions to avoid ambiguity and ensure effective task execution.
- Require agents to report back on task completion or encountered issues.
5. **Reasoning and Decisions**:
- Offer brief reasoning when selecting or creating agents to maintain transparency.
- Avoid using an agent if unnecessary, with a clear explanation if no agents are suitable for a task.
# Output Format
Present your plan in clear, bullet-point format or short concise paragraphs, outlining task assignment, agent creation, efficiency strategies, and communication protocols.
# Notes
- Preserve transparency by always providing reasoning for task-agent assignments and creation.
- Ensure instructions to agents are unambiguous to minimize error.
"""
class AutoSwarmBuilder:
"""A class that automatically builds and manages swarms of AI agents.
This class handles the creation, coordination and execution of multiple AI agents working
together as a swarm to accomplish complex tasks. It uses a boss agent to delegate work
and create new specialized agents as needed.
Args:
name (str): The name of the swarm
description (str): A description of the swarm's purpose
verbose (bool, optional): Whether to output detailed logs. Defaults to True.
max_loops (int, optional): Maximum number of execution loops. Defaults to 1.
"""
def __init__(
self,
name: str = None,
description: str = None,
verbose: bool = True,
max_loops: int = 1,
):
self.name = name
self.description = description
self.verbose = verbose
self.max_loops = max_loops
self.agents_pool = []
logger.info(
f"Initialized AutoSwarmBuilder: {name} {description}"
)
# @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
def run(self, task: str, image_url: str = None, *args, **kwargs):
"""Run the swarm on a given task.
Args:
task (str): The task to be accomplished
image_url (str, optional): URL of an image input if needed. Defaults to None.
*args: Variable length argument list
**kwargs: Arbitrary keyword arguments
Returns:
The output from the swarm's execution
"""
logger.info(f"Running swarm on task: {task}")
agents = self._create_agents(task, image_url, *args, **kwargs)
logger.info(f"Agents created {len(agents)}")
logger.info("Routing task through swarm")
output = self.swarm_router(agents, task, image_url)
logger.info(f"Swarm execution complete with output: {output}")
return output
# @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
def _create_agents(self, task: str, *args, **kwargs):
"""Create the necessary agents for a task.
Args:
task (str): The task to create agents for
*args: Variable length argument list
**kwargs: Arbitrary keyword arguments
Returns:
list: List of created agents
"""
logger.info("Creating agents for task")
model = OpenAIFunctionCaller(
system_prompt=BOSS_SYSTEM_PROMPT,
api_key=os.getenv("OPENAI_API_KEY"),
temperature=0.1,
base_model=SwarmConfig,
)
agents_dictionary = model.run(task)
logger.info(f"Agents dictionary: {agents_dictionary}")
# Convert dictionary to SwarmConfig if needed
if isinstance(agents_dictionary, dict):
agents_dictionary = SwarmConfig(**agents_dictionary)
# Set swarm config
self.name = agents_dictionary.name
self.description = agents_dictionary.description
self.max_loops = getattr(
agents_dictionary, "max_loops", 1
) # Default to 1 if not set
logger.info(
f"Swarm config: {self.name}, {self.description}, {self.max_loops}"
)
# Create agents from config
agents = []
for agent_config in agents_dictionary.agents:
# Convert dict to AgentConfig if needed
if isinstance(agent_config, dict):
agent_config = AgentConfig(**agent_config)
agent = self.build_agent(
agent_name=agent_config.name,
agent_description=agent_config.description,
agent_system_prompt=agent_config.system_prompt,
max_loops=agent_config.max_loops,
)
agents.append(agent)
return agents
def build_agent(
self,
agent_name: str,
agent_description: str,
agent_system_prompt: str,
max_loops: int = 1,
):
"""Build a single agent with the given specifications.
Args:
agent_name (str): Name of the agent
agent_description (str): Description of the agent's purpose
agent_system_prompt (str): The system prompt for the agent
Returns:
Agent: The constructed agent instance
"""
logger.info(f"Building agent: {agent_name}")
agent = Agent(
agent_name=agent_name,
description=agent_description,
system_prompt=agent_system_prompt,
llm=model,
max_loops=max_loops,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path=f"{agent_name}.json",
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=False,
output_type="str", # "json", "dict", "csv" OR "string" soon "yaml" and
streaming_on=False,
auto_generate_prompt=True,
)
return agent
# @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
def swarm_router(
self,
agents: List[Agent],
task: str,
image_url: str = None,
*args,
**kwargs,
):
"""Route tasks between agents in the swarm.
Args:
agents (List[Agent]): List of available agents
task (str): The task to route
image_url (str, optional): URL of an image input if needed. Defaults to None.
*args: Variable length argument list
**kwargs: Arbitrary keyword arguments
Returns:
The output from the routed task execution
"""
logger.info("Routing task through swarm")
swarm_router_instance = SwarmRouter(
agents=agents,
swarm_type="auto",
max_loops=1,
)
return swarm_router_instance.run(
self.name + " " + self.description + " " + task,
)
example = AutoSwarmBuilder()
print(
example.run(
"Write multiple blog posts about the latest advancements in swarm intelligence all at once"
)
)

@ -6,6 +6,12 @@ import yaml
from termcolor import colored
from swarms.structs.base_structure import BaseStructure
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from swarms.structs.agent import (
Agent,
) # Only imported during type checking
class Conversation(BaseStructure):
@ -392,6 +398,33 @@ class Conversation(BaseStructure):
def to_yaml(self):
return yaml.dump(self.conversation_history)
def get_visible_messages(self, agent: "Agent", turn: int):
"""
Get the visible messages for a given agent and turn.
Args:
agent (Agent): The agent.
turn (int): The turn number.
Returns:
List[Dict]: The list of visible messages.
"""
# Get the messages before the current turn
prev_messages = [
message
for message in self.conversation_history
if message["turn"] < turn
]
visible_messages = []
for message in prev_messages:
if (
message["visible_to"] == "all"
or agent.agent_name in message["visible_to"]
):
visible_messages.append(message)
return visible_messages
# # Example usage
# conversation = Conversation()

@ -1,72 +1,159 @@
from typing import List, Dict
from typing import List, Dict, Optional, Union, Callable, Any
from pydantic import BaseModel, Field
from swarms.structs.conversation import Conversation
from swarms.utils.loguru_logger import logger
from swarms.structs.agent import Agent
from datetime import datetime
import json
from uuid import uuid4
from swarms.schemas.agent_step_schemas import ManySteps
import logging
from swarms.structs.agent import Agent
from swarms.structs.agents_available import showcase_available_agents
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Message(BaseModel):
"""Single message in the conversation"""
role: str
content: str
timestamp: datetime = Field(default_factory=datetime.utcnow)
class AgentMetadata(BaseModel):
"""Metadata for tracking agent state and configuration"""
agent_name: str
agent_type: str
system_prompt: Optional[str] = None
description: Optional[str] = None
config: Dict[str, Any] = Field(default_factory=dict)
class InteractionLog(BaseModel):
"""Log entry for a single interaction"""
id: str = Field(default_factory=lambda: uuid4().hex)
agent_name: str
position: int
input_text: str
output_text: str
timestamp: datetime = Field(default_factory=datetime.utcnow)
metadata: Dict[str, Any] = Field(default_factory=dict)
class GroupChatInput(BaseModel):
class GroupChatState(BaseModel):
"""Complete state of the group chat"""
id: str = Field(default_factory=lambda: uuid4().hex)
name: Optional[str] = None
description: Optional[str] = None
admin_name: str
group_objective: str
agents: List[Dict[str, str]]
max_rounds: int
selector_agent: Dict[str, str]
rules: str
rules: Optional[str] = None
agent_metadata: List[AgentMetadata]
messages: List[Message]
interactions: List[InteractionLog]
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
class GroupChatOutput(BaseModel):
id: str = Field(uuid4().hex)
task: str = Field(..., description=None)
input_config: GroupChatInput
agent_outputs: List[ManySteps] = Field(..., description=None)
class AgentWrapper:
"""Wrapper class to standardize agent interfaces"""
def __init__(
self,
agent: Union["Agent", Callable],
agent_name: str,
system_prompt: Optional[str] = None,
):
self.agent = agent
self.agent_name = agent_name
self.system_prompt = system_prompt
self._validate_agent()
def _validate_agent(self):
"""Validate that the agent has the required interface"""
if hasattr(self.agent, "run"):
self.run = self.agent.run
elif callable(self.agent):
self.run = self.agent
else:
raise ValueError(
"Agent must either have a 'run' method or be callable"
)
def get_metadata(self) -> AgentMetadata:
"""Extract metadata from the agent"""
return AgentMetadata(
agent_name=self.agent_name,
agent_type=type(self.agent).__name__,
system_prompt=self.system_prompt,
config={
k: v
for k, v in self.agent.__dict__.items()
if isinstance(v, (str, int, float, bool, dict, list))
},
)
class GroupChat:
"""Manager class for a group chat.
"""Enhanced GroupChat manager with state persistence and comprehensive logging.
This class handles the management of a group chat, including initializing the conversation,
selecting the next speaker, resetting the chat, and executing the chat rounds.
This class implements a multi-agent chat system with the following key features:
- State persistence to disk
- Comprehensive interaction logging
- Configurable agent selection
- Early stopping conditions
- Conversation export capabilities
Args:
agents (List[Agent], optional): List of agents participating in the group chat. Defaults to None.
max_rounds (int, optional): Maximum number of chat rounds. Defaults to 10.
admin_name (str, optional): Name of the admin user. Defaults to "Admin".
group_objective (str, optional): Objective of the group chat. Defaults to None.
selector_agent (Agent, optional): Agent responsible for selecting the next speaker. Defaults to None.
rules (str, optional): Rules for the group chat. Defaults to None.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
The GroupChat coordinates multiple agents to have a goal-directed conversation,
with one agent speaking at a time based on a selector agent's decisions.
Attributes:
agents (List[Agent]): List of agents participating in the group chat.
max_rounds (int): Maximum number of chat rounds.
admin_name (str): Name of the admin user.
group_objective (str): Objective of the group chat.
selector_agent (Agent): Agent responsible for selecting the next speaker.
messages (Conversation): Conversation object for storing the chat messages.
name (Optional[str]): Name of the group chat
description (Optional[str]): Description of the group chat's purpose
agents (List[Union["Agent", Callable]]): List of participating agents
max_rounds (int): Maximum number of conversation rounds
admin_name (str): Name of the administrator
group_objective (str): The goal/objective of the conversation
selector_agent (Union["Agent", Callable]): Agent that selects next speaker
rules (Optional[str]): Rules governing the conversation
state_path (Optional[str]): Path to save conversation state
showcase_agents_on (bool): Whether to showcase agent capabilities
"""
def __init__(
self,
name: str = None,
description: str = None,
agents: List[Agent] = None,
name: Optional[str] = None,
description: Optional[str] = None,
agents: List[Union["Agent", Callable]] = None,
max_rounds: int = 10,
admin_name: str = "Admin",
group_objective: str = None,
selector_agent: Agent = None,
rules: str = None,
*args,
**kwargs,
selector_agent: Union["Agent", Callable] = None,
rules: Optional[str] = None,
state_path: Optional[str] = None,
showcase_agents_on: bool = False,
):
# super().__init__(agents = agents, *args, **kwargs)
if not agents:
raise ValueError(
"Agents cannot be empty. Add more agents."
)
"""Initialize a new GroupChat instance.
Args:
name: Name of the group chat
description: Description of the group chat's purpose
agents: List of participating agents
max_rounds: Maximum number of conversation rounds
admin_name: Name of the administrator
group_objective: The goal/objective of the conversation
selector_agent: Agent that selects next speaker
rules: Rules governing the conversation
state_path: Path to save conversation state
showcase_agents_on: Whether to showcase agent capabilities
Raises:
ValueError: If no agents are provided
"""
self.name = name
self.description = description
self.agents = agents
@ -74,184 +161,327 @@ class GroupChat:
self.admin_name = admin_name
self.group_objective = group_objective
self.selector_agent = selector_agent
self.rules = rules
self.state_path = state_path
self.showcase_agents_on = showcase_agents_on
# Initialize the conversation
self.message_history = Conversation(
system_prompt=self.group_objective,
time_enabled=True,
user=self.admin_name,
rules=rules,
*args,
**kwargs,
if not agents:
raise ValueError("At least two agents are required")
# Generate unique state path if not provided
self.state_path = (
state_path or f"group_chat_{uuid4().hex}.json"
)
# Initialize log for interactions
self.group_log = GroupChatLog(
admin_name=self.admin_name,
group_objective=self.group_objective,
# Wrap all agents to standardize interface
self.wrapped_agents = [
AgentWrapper(
agent,
(
f"Agent_{i}"
if not hasattr(agent, "agent_name")
else agent.agent_name
),
)
for i, agent in enumerate(agents)
]
# Configure selector agent
self.selector_agent = AgentWrapper(
selector_agent or self.wrapped_agents[0].agent,
"Selector",
"Select the next speaker based on the conversation context",
)
@property
def agent_names(self) -> List[str]:
"""Return the names of the agents in the group chat."""
return [agent.agent_name for agent in self.agents]
# Initialize conversation state
self.state = GroupChatState(
name=name,
description=description,
admin_name=admin_name,
group_objective=group_objective,
max_rounds=max_rounds,
rules=rules,
agent_metadata=[
agent.get_metadata() for agent in self.wrapped_agents
],
messages=[],
interactions=[],
)
def reset(self):
"""Reset the group chat."""
logger.info("Resetting GroupChat")
self.message_history.clear()
# Showcase agents if enabled
if self.showcase_agents_on is True:
self.showcase_agents()
def agent_by_name(self, name: str) -> Agent:
"""Find an agent whose name is contained within the given 'name' string.
def showcase_agents(self):
"""Showcase available agents and update their system prompts.
Args:
name (str): Name string to search for.
This method displays agent capabilities and updates each agent's
system prompt with information about other agents in the group.
"""
out = showcase_available_agents(
name=self.name,
description=self.description,
agents=self.wrapped_agents,
)
Returns:
Agent: Agent object with a name contained in the given 'name' string.
for agent in self.wrapped_agents:
# Initialize system_prompt if None
if agent.system_prompt is None:
agent.system_prompt = ""
agent.system_prompt += out
Raises:
ValueError: If no agent is found with a name contained in the given 'name' string.
def save_state(self) -> None:
"""Save current conversation state to disk.
The state is saved as a JSON file at the configured state_path.
"""
for agent in self.agents:
if agent.agent_name in name:
return agent
raise ValueError(
f"No agent found with a name contained in '{name}'."
)
with open(self.state_path, "w") as f:
json.dump(self.state.dict(), f, default=str, indent=2)
logger.info(f"State saved to {self.state_path}")
def next_agent(self, agent: Agent) -> Agent:
"""Return the next agent in the list.
@classmethod
def load_state(cls, state_path: str) -> "GroupChat":
"""Load GroupChat from saved state.
Args:
agent (Agent): Current agent.
state_path: Path to the saved state JSON file
Returns:
Agent: Next agent in the list.
GroupChat: A new GroupChat instance with restored state
Raises:
FileNotFoundError: If state file doesn't exist
json.JSONDecodeError: If state file is invalid JSON
"""
return self.agents[
(self.agent_names.index(agent.agent_name) + 1)
% len(self.agents)
]
with open(state_path, "r") as f:
state_dict = json.load(f)
# Convert loaded data back to state model
state = GroupChatState(**state_dict)
# Initialize with minimal config, then restore state
instance = cls(
name=state.name,
admin_name=state.admin_name,
agents=[], # Temporary empty list
group_objective=state.group_objective,
)
instance.state = state
return instance
def select_speaker_msg(self):
"""Return the message for selecting the next speaker."""
prompt = f"""
You are in a role play game. The following roles are available:
{self._participant_roles()}.
def _log_interaction(
self,
agent_name: str,
position: int,
input_text: str,
output_text: str,
) -> None:
"""Log a single interaction in the conversation.
Read the following conversation.
Then select the next role from {self.agent_names} to play. Only return the role.
Args:
agent_name: Name of the speaking agent
position: Position in conversation sequence
input_text: Input context provided to agent
output_text: Agent's response
"""
return prompt
log_entry = InteractionLog(
agent_name=agent_name,
position=position,
input_text=input_text,
output_text=output_text,
metadata={
"current_agents": [
a.agent_name for a in self.wrapped_agents
],
"round": position // len(self.wrapped_agents),
},
)
self.state.interactions.append(log_entry)
self.save_state()
def select_speaker(
self, last_speaker_agent: Agent, selector_agent: Agent
) -> Agent:
"""Select the next speaker.
def _add_message(self, role: str, content: str) -> None:
"""Add a message to the conversation history.
Args:
last_speaker_agent (Agent): Last speaker in the conversation.
selector_agent (Agent): Agent responsible for selecting the next speaker.
Returns:
Agent: Next speaker.
role: Speaker's role/name
content: Message content
"""
logger.info("Selecting a new speaker")
selector_agent.system_prompt = self.select_speaker_msg()
n_agents = len(self.agent_names)
if n_agents < 3:
logger.warning(
f"GroupChat is underpopulated with {n_agents} agents. Direct communication might be more efficient."
)
self.message_history.add(
role=self.admin_name,
content=f"Read the above conversation. Then select the next most suitable role from {self.agent_names} to play. Only return the role.",
)
message = Message(role=role, content=content)
self.state.messages.append(message)
self.save_state()
name = selector_agent.run(
self.message_history.return_history_as_string()
)
try:
selected_agent = self.agent_by_name(name)
return selected_agent
except ValueError:
return self.next_agent(last_speaker_agent)
def select_next_speaker(
self, last_speaker: AgentWrapper
) -> AgentWrapper:
"""Select the next speaker using the selector agent.
def _participant_roles(self):
"""Print the roles of the participants.
Args:
last_speaker: The agent who spoke last
Returns:
str: Participant roles.
AgentWrapper: The next agent to speak
Note:
Falls back to round-robin selection if selector agent fails
"""
return "\n".join(
conversation_history = "\n".join(
[
f"{agent.agent_name}: {agent.system_prompt}"
for agent in self.agents
f"{msg.role}: {msg.content}"
for msg in self.state.messages
]
)
def run(self, task: str, *args, **kwargs):
"""Call 'GroupChatManager' instance as a function.
selection_prompt = f"""
Current speakers: {[agent.agent_name for agent in self.wrapped_agents]}
Last speaker: {last_speaker.agent_name}
Group objective: {self.state.group_objective}
Based on the conversation history and group objective, select the next most appropriate speaker.
Only return the speaker's name.
Conversation history:
{conversation_history}
"""
try:
next_speaker_name = self.selector_agent.run(
selection_prompt
).strip()
return next(
agent
for agent in self.wrapped_agents
if agent.agent_name in next_speaker_name
)
except (StopIteration, Exception) as e:
logger.warning(
f"Selector agent failed: {str(e)}. Falling back to round-robin."
)
# Fallback to round-robin if selection fails
current_idx = self.wrapped_agents.index(last_speaker)
return self.wrapped_agents[
(current_idx + 1) % len(self.wrapped_agents)
]
def run(self, task: str) -> str:
"""Execute the group chat conversation.
Args:
task (str): Task to be performed.
task: The initial task/question to discuss
Returns:
str: Reply from the last speaker.
str: The final response from the conversation
Raises:
Exception: If any error occurs during execution
"""
try:
logger.info(
f"Activating GroupChat with {len(self.agents)} Agents"
)
self.message_history.add(
self.selector_agent.agent_name, task
)
logger.info(f"Starting GroupChat with task: {task}")
self._add_message(self.state.admin_name, task)
for i in range(self.max_rounds):
speaker_agent = self.select_speaker(
last_speaker_agent=self.selector_agent,
selector_agent=self.selector_agent,
current_speaker = self.wrapped_agents[0]
final_response = None
for round_num in range(self.state.max_rounds):
# Select next speaker
current_speaker = self.select_next_speaker(
current_speaker
)
logger.info(
f"Next speaker selected: {speaker_agent.agent_name}"
f"Selected speaker: {current_speaker.agent_name}"
)
reply = speaker_agent.run(
self.message_history.return_history_as_string(),
*args,
**kwargs,
)
self.message_history.add(
speaker_agent.agent_name, reply
# Prepare context and get response
conversation_history = "\n".join(
[
f"{msg.role}: {msg.content}"
for msg in self.state.messages[
-10:
] # Last 10 messages for context
]
)
# Log the interaction
self.group_log.log_interaction(
agent_name=speaker_agent.agent_name,
position=i,
input_text=self.message_history.return_history_as_string(),
output_text=reply,
try:
response = current_speaker.run(
conversation_history
)
final_response = response
except Exception as e:
logger.error(
f"Agent {current_speaker.agent_name} failed: {str(e)}"
)
continue
# Log interaction and add to message history
self._log_interaction(
current_speaker.agent_name,
round_num,
conversation_history,
response,
)
self._add_message(
current_speaker.agent_name, response
)
if i == self.max_rounds - 1:
# Optional: Add early stopping condition based on response content
if (
"TASK_COMPLETE" in response
or "CONCLUSION" in response
):
logger.info(
"Task completion detected, ending conversation"
)
break
return reply
return final_response or "No valid response generated"
except Exception as error:
logger.error(
f"Error detected: {error}. Please optimize the inputs and submit an issue on the swarms GitHub."
)
raise error
except Exception as e:
logger.error(f"Error in GroupChat execution: {str(e)}")
raise
def get_conversation_summary(self) -> Dict[str, Any]:
"""Return a summary of the conversation.
def get_group_log_as_json(self) -> str:
"""Return the interaction log as a JSON string."""
return self.group_log.return_json()
Returns:
Dict containing conversation metrics and status
"""
return {
"id": self.state.id,
"total_interactions": len(self.state.interactions),
"participating_agents": [
agent.agent_name for agent in self.wrapped_agents
],
"conversation_length": len(self.state.messages),
"duration": (
datetime.utcnow() - self.state.created_at
).total_seconds(),
"objective_completed": any(
"TASK_COMPLETE" in msg.content
for msg in self.state.messages
),
}
def export_conversation(
self, format: str = "json"
) -> Union[str, Dict]:
"""Export the conversation in the specified format.
Args:
format: Output format ("json" or "text")
Returns:
Union[str, Dict]: Conversation in requested format
Raises:
ValueError: If format is not supported
"""
if format == "json":
return self.state.dict()
elif format == "text":
return "\n".join(
[
f"{msg.role} ({msg.timestamp}): {msg.content}"
for msg in self.state.messages
]
)
else:
raise ValueError(f"Unsupported export format: {format}")

@ -1,214 +0,0 @@
import hashlib
from time import time_ns
from typing import Callable, List, Optional, Sequence, Union
from swarms.structs.agent import Agent
from swarms.utils.loguru_logger import logger
from swarms.structs.base_swarm import BaseSwarm
def _hash(input: str):
"""
Hashes the input string using SHA256 algorithm.
Args:
input (str): The string to be hashed.
Returns:
str: The hexadecimal representation of the hash value.
"""
hex_dig = hashlib.sha256(input.encode("utf-8")).hexdigest()
return hex_dig
def msg_hash(
agent: Agent, content: str, turn: int, msg_type: str = "text"
):
"""
Generate a hash value for a message.
Args:
agent (Agent): The agent sending the message.
content (str): The content of the message.
turn (int): The turn number of the message.
msg_type (str, optional): The type of the message. Defaults to "text".
Returns:
int: The hash value of the message.
"""
time = time_ns()
return _hash(
f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:"
f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}"
)
class MessagePool(BaseSwarm):
"""
A class representing a message pool for agents in a swarm.
Attributes:
agents (Optional[Sequence[Agent]]): The list of agents in the swarm.
moderator (Optional[Agent]): The moderator agent.
turns (Optional[int]): The number of turns.
routing_function (Optional[Callable]): The routing function for message distribution.
show_names (Optional[bool]): Flag indicating whether to show agent names.
messages (List[Dict]): The list of messages in the pool.
Examples:
>>> from swarms.structs.agent import Agent
>>> from swarms.structs.message_pool import MessagePool
>>> agent1 = Agent(agent_name="agent1")
>>> agent2 = Agent(agent_name="agent2")
>>> agent3 = Agent(agent_name="agent3")
>>> moderator = Agent(agent_name="moderator")
>>> agents = [agent1, agent2, agent3]
>>> message_pool = MessagePool(agents=agents, moderator=moderator, turns=5)
>>> message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
>>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
>>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
>>> message_pool.get_all_messages()
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent1, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
"""
def __init__(
self,
agents: Optional[Sequence[Agent]] = None,
moderator: Optional[Agent] = None,
turns: Optional[int] = 5,
routing_function: Optional[Callable] = None,
show_names: Optional[bool] = False,
autosave: Optional[bool] = False,
*args,
**kwargs,
):
super().__init__()
self.agent = agents
self.moderator = moderator
self.turns = turns
self.routing_function = routing_function
self.show_names = show_names
self.autosave = autosave
self.messages = []
logger.info("MessagePool initialized")
logger.info(f"Number of agents: {len(agents)}")
logger.info(
f"Agents: {[agent.agent_name for agent in agents]}"
)
logger.info(f"moderator: {moderator.agent_name} is available")
logger.info(f"Number of turns: {turns}")
def add(
self,
agent: Agent,
content: str,
turn: int,
visible_to: Union[str, List[str]] = "all",
logged: bool = True,
):
"""
Add a message to the pool.
Args:
agent (Agent): The agent sending the message.
content (str): The content of the message.
turn (int): The turn number.
visible_to (Union[str, List[str]], optional): The agents who can see the message. Defaults to "all".
logged (bool, optional): Flag indicating whether the message should be logged. Defaults to True.
"""
self.messages.append(
{
"agent": agent,
"content": content,
"turn": turn,
"visible_to": visible_to,
"logged": logged,
}
)
logger.info(f"Message added: {content}")
def reset(self):
"""
Reset the message pool.
"""
self.messages = []
logger.info("MessagePool reset")
def last_turn(self):
"""
Get the last turn number.
Returns:
int: The last turn number.
"""
if len(self.messages) == 0:
return 0
else:
return self.messages[-1]["turn"]
@property
def last_message(self):
"""
Get the last message in the pool.
Returns:
dict: The last message.
"""
if len(self.messages) == 0:
return None
else:
return self.messages[-1]
def get_all_messages(self):
"""
Get all messages in the pool.
Returns:
List[Dict]: The list of all messages.
"""
return self.messages
def get_visible_messages(self, agent: Agent, turn: int):
"""
Get the visible messages for a given agent and turn.
Args:
agent (Agent): The agent.
turn (int): The turn number.
Returns:
List[Dict]: The list of visible messages.
"""
# Get the messages before the current turn
prev_messages = [
message
for message in self.messages
if message["turn"] < turn
]
visible_messages = []
for message in prev_messages:
if (
message["visible_to"] == "all"
or agent.agent_name in message["visible_to"]
):
visible_messages.append(message)
return visible_messages
# def query(self, query: str):
# """
# Query a message from the messages list and then pass it to the moderator
# """
# return [
# (mod, content)
# for mod, content, _ in self.messages # Add an underscore to ignore the rest of the elements
# if query in content
# ]

@ -6,7 +6,7 @@ from loguru import logger
from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
from swarms.telemetry.log_swarm_data import log_agent_data
from swarms.telemetry.capture_sys_data import log_agent_data
from swarms.schemas.agent_step_schemas import ManySteps
from swarms.prompts.ag_prompt import aggregator_system_prompt

@ -17,11 +17,18 @@ from swarms.utils.loguru_logger import logger
from swarms.utils.wrapper_clusterop import (
exec_callable_with_clusterops,
)
from swarms.utils.swarm_reliability_checks import reliability_check
# Literal of output types
OutputType = Literal[
"all", "final", "list", "dict", ".json", ".md", ".txt", ".yaml", ".toml"
"all",
"final",
"list",
"dict",
".json",
".md",
".txt",
".yaml",
".toml",
]
@ -451,14 +458,16 @@ class AgentRearrange(BaseSwarm):
return output
except Exception as e:
logger.error(f"An error occurred: {e} \n {traceback.format_exc()}")
logger.error(
f"An error occurred: {e} \n {traceback.format_exc()}"
)
return e
def run(
self,
task: str = None,
img: str = None,
device: str = "cpu",
device: str = "cpu",
device_id: int = 1,
all_cores: bool = True,
all_gpus: bool = False,
@ -492,7 +501,7 @@ class AgentRearrange(BaseSwarm):
*args,
**kwargs,
)
def __call__(self, task: str, *args, **kwargs):
"""
Make the class callable by executing the run() method.

@ -44,8 +44,6 @@ class SequentialWorkflow:
self.reliability_check()
self.agent_rearrange = AgentRearrange(
name=name,
description=description,
@ -58,10 +56,10 @@ class SequentialWorkflow:
*args,
**kwargs,
)
# Handle agent showcase
self.handle_agent_showcase()
def sequential_flow(self):
# Only create flow if agents exist
if self.agents:
@ -70,21 +68,28 @@ class SequentialWorkflow:
for agent in self.agents:
try:
# Try to get agent_name, fallback to name if not available
agent_name = getattr(agent, 'agent_name', None) or agent.name
agent_name = (
getattr(agent, "agent_name", None)
or agent.name
)
agent_names.append(agent_name)
except AttributeError:
logger.warning(f"Could not get name for agent {agent}")
logger.warning(
f"Could not get name for agent {agent}"
)
continue
if agent_names:
flow = " -> ".join(agent_names)
else:
flow = ""
logger.warning("No valid agent names found to create flow")
logger.warning(
"No valid agent names found to create flow"
)
else:
flow = ""
logger.warning("No agents provided to create flow")
return flow
def reliability_check(self):
@ -93,9 +98,11 @@ class SequentialWorkflow:
if self.max_loops == 0:
raise ValueError("max_loops cannot be 0")
if self.output_type not in OutputType:
raise ValueError("output_type must be 'all', 'final', 'list', 'dict', '.json', '.md', '.txt', '.yaml', or '.toml'")
raise ValueError(
"output_type must be 'all', 'final', 'list', 'dict', '.json', '.md', '.txt', '.yaml', or '.toml'"
)
logger.info("Checks completed your swarm is ready.")

@ -12,7 +12,7 @@ from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.file_processing import create_file_in_folder
from swarms.telemetry.log_swarm_data import log_agent_data
from swarms.telemetry.capture_sys_data import log_agent_data
time = datetime.datetime.now().isoformat()
uuid_hex = uuid.uuid4().hex

@ -187,7 +187,6 @@ class SwarmRouter:
# Add documents to the logs
# self.logs.append(Document(file_path=self.documents, data=data))
def activate_shared_memory(self):
logger.info("Activating shared memory with all agents ")
@ -451,7 +450,7 @@ class SwarmRouter:
def __call__(self, task: str, *args, **kwargs) -> Any:
"""
Make the SwarmRouter instance callable.
Args:
task (str): The task to be executed by the swarm.
*args: Variable length argument list.
@ -611,7 +610,10 @@ class SwarmRouter:
Raises:
Exception: If an error occurs during task execution.
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
from concurrent.futures import (
ThreadPoolExecutor,
as_completed,
)
results = []
with ThreadPoolExecutor() as executor:
@ -620,7 +622,7 @@ class SwarmRouter:
executor.submit(self.run, task, *args, **kwargs)
for task in tasks
]
# Process results as they complete rather than waiting for all
for future in as_completed(futures):
try:
@ -629,7 +631,7 @@ class SwarmRouter:
except Exception as e:
logger.error(f"Task execution failed: {str(e)}")
results.append(None)
return results

@ -1,16 +0,0 @@
def log_agent_data(data: dict):
import requests
data_dict = {
"data": data,
}
url = "https://swarms.world/api/get-agents/log-agents"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869",
}
response = requests.post(url, json=data_dict, headers=headers)
return response.json()

@ -10,7 +10,12 @@ def get_python_version():
return platform.python_version()
def get_pip_version():
def get_pip_version() -> str:
"""Get pip version
Returns:
str: The version of pip installed
"""
try:
pip_version = (
subprocess.check_output(["pip", "--version"])
@ -22,7 +27,12 @@ def get_pip_version():
return pip_version
def get_swarms_verison():
def get_swarms_verison() -> tuple[str, str]:
"""Get swarms version from both command line and package
Returns:
tuple[str, str]: A tuple containing (command line version, package version)
"""
try:
swarms_verison_cmd = (
subprocess.check_output(["swarms", "--version"])
@ -38,15 +48,30 @@ def get_swarms_verison():
return swarms_verison
def get_os_version():
def get_os_version() -> str:
"""Get operating system version
Returns:
str: The operating system version and platform details
"""
return platform.platform()
def get_cpu_info():
def get_cpu_info() -> str:
"""Get CPU information
Returns:
str: The processor information
"""
return platform.processor()
def get_ram_info():
def get_ram_info() -> str:
"""Get RAM information
Returns:
str: A formatted string containing total, used and free RAM in GB
"""
vm = psutil.virtual_memory()
used_ram_gb = vm.used / (1024**3)
free_ram_gb = vm.free / (1024**3)
@ -57,7 +82,15 @@ def get_ram_info():
)
def get_package_mismatches(file_path="pyproject.toml"):
def get_package_mismatches(file_path: str = "pyproject.toml") -> str:
"""Get package version mismatches between pyproject.toml and installed packages
Args:
file_path (str, optional): Path to pyproject.toml file. Defaults to "pyproject.toml".
Returns:
str: A formatted string containing package version mismatches
"""
with open(file_path) as file:
pyproject = toml.load(file)
dependencies = pyproject["tool"]["poetry"]["dependencies"]
@ -89,7 +122,12 @@ def get_package_mismatches(file_path="pyproject.toml"):
return "\n" + "\n".join(mismatches)
def system_info():
def system_info() -> dict[str, str]:
"""Get system information including Python, pip, OS, CPU and RAM details
Returns:
dict[str, str]: A dictionary containing system information
"""
return {
"Python Version": get_python_version(),
"Pip Version": get_pip_version(),

@ -0,0 +1,141 @@
from typing import Any, List, Optional, Union
from pathlib import Path
from loguru import logger
from doc_master import doc_master
from concurrent.futures import ThreadPoolExecutor, as_completed
from tenacity import retry, stop_after_attempt, wait_exponential
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
)
def _process_document(doc_path: Union[str, Path]) -> str:
"""Safely process a single document with retries.
Args:
doc_path: Path to the document to process
Returns:
Processed document text
Raises:
Exception: If document processing fails after retries
"""
try:
return doc_master(
file_path=str(doc_path), output_type="string"
)
except Exception as e:
logger.error(
f"Error processing document {doc_path}: {str(e)}"
)
raise
def handle_input_docs(
agents: Any,
docs: Optional[List[Union[str, Path]]] = None,
doc_folder: Optional[Union[str, Path]] = None,
max_workers: int = 4,
chunk_size: int = 1000000,
) -> Any:
"""
Add document content to agent prompts with improved reliability and performance.
Args:
agents: Dictionary mapping agent names to Agent objects
docs: List of document paths
doc_folder: Path to folder containing documents
max_workers: Maximum number of parallel document processing workers
chunk_size: Maximum characters to process at once to avoid memory issues
Raises:
ValueError: If neither docs nor doc_folder is provided
RuntimeError: If document processing fails
"""
if not agents:
logger.warning(
"No agents provided, skipping document distribution"
)
return
if not docs and not doc_folder:
logger.warning(
"No documents or folder provided, skipping document distribution"
)
return
logger.info("Starting document distribution to agents")
try:
processed_docs = []
# Process individual documents in parallel
if docs:
with ThreadPoolExecutor(
max_workers=max_workers
) as executor:
future_to_doc = {
executor.submit(_process_document, doc): doc
for doc in docs
}
for future in as_completed(future_to_doc):
doc = future_to_doc[future]
try:
processed_docs.append(future.result())
except Exception as e:
logger.error(
f"Failed to process document {doc}: {str(e)}"
)
raise RuntimeError(
f"Document processing failed: {str(e)}"
)
# Process folder if specified
elif doc_folder:
try:
folder_content = doc_master(
folder_path=str(doc_folder), output_type="string"
)
processed_docs.append(folder_content)
except Exception as e:
logger.error(
f"Failed to process folder {doc_folder}: {str(e)}"
)
raise RuntimeError(
f"Folder processing failed: {str(e)}"
)
# Combine and chunk the processed documents
combined_data = "\n".join(processed_docs)
# Update agent prompts in chunks to avoid memory issues
for agent in agents.values():
try:
for i in range(0, len(combined_data), chunk_size):
chunk = combined_data[i : i + chunk_size]
if i == 0:
agent.system_prompt += (
"\nDocuments:\n" + chunk
)
else:
agent.system_prompt += chunk
except Exception as e:
logger.error(
f"Failed to update agent prompt: {str(e)}"
)
raise RuntimeError(
f"Agent prompt update failed: {str(e)}"
)
logger.info(
f"Successfully added documents to {len(agents)} agents"
)
return agents
except Exception as e:
logger.error(f"Document distribution failed: {str(e)}")
raise RuntimeError(f"Document distribution failed: {str(e)}")

@ -0,0 +1,102 @@
from typing import Union, Dict, List, Tuple, Any
def any_to_str(data: Union[str, Dict, List, Tuple, Any]) -> str:
"""Convert any input data type to a nicely formatted string.
This function handles conversion of various Python data types into a clean string representation.
It recursively processes nested data structures and handles None values gracefully.
Args:
data: Input data of any type to convert to string. Can be:
- Dictionary
- List/Tuple
- String
- None
- Any other type that can be converted via str()
Returns:
str: A formatted string representation of the input data.
- Dictionaries are formatted as "key: value" pairs separated by commas
- Lists/tuples are comma-separated
- None returns empty string
- Other types are converted using str()
Examples:
>>> any_to_str({'a': 1, 'b': 2})
'a: 1, b: 2'
>>> any_to_str([1, 2, 3])
'1, 2, 3'
>>> any_to_str(None)
''
"""
try:
if isinstance(data, dict):
# Format dictionary with newlines and indentation
items = []
for k, v in data.items():
value = any_to_str(v)
items.append(f"{k}: {value}")
return "\n".join(items)
elif isinstance(data, (list, tuple)):
# Format sequences with brackets and proper spacing
items = [any_to_str(x) for x in data]
if len(items) == 0:
return "[]" if isinstance(data, list) else "()"
return (
f"[{', '.join(items)}]"
if isinstance(data, list)
else f"({', '.join(items)})"
)
elif data is None:
return "None"
else:
# Handle strings and other types
if isinstance(data, str):
return f'"{data}"'
return str(data)
except Exception as e:
return f"Error converting data: {str(e)}"
def main():
# Example 1: Dictionary
print("Dictionary:")
print(
any_to_str(
{
"name": "John",
"age": 30,
"hobbies": ["reading", "hiking"],
}
)
)
print("\nNested Dictionary:")
print(
any_to_str(
{
"user": {
"id": 123,
"details": {"city": "New York", "active": True},
},
"data": [1, 2, 3],
}
)
)
print("\nList and Tuple:")
print(any_to_str([1, "text", None, (1, 2)]))
print(any_to_str((True, False, None)))
print("\nEmpty Collections:")
print(any_to_str([]))
print(any_to_str({}))
if __name__ == "__main__":
main()

@ -137,53 +137,3 @@ def data_to_text(file: str) -> str:
return data
except Exception as e:
raise OSError(f"Error reading file: {file}") from e
def data_to_text(file):
"""
Converts the given data file to text format.
Args:
file (str): The path to the data file.
Returns:
str: The text representation of the data file.
Raises:
FileNotFoundError: If the file does not exist.
IOError: If there is an error reading the file.
Examples:
>>> data_to_text("data.csv")
'This is the text representation of the data file.'
"""
if not os.path.exists(file):
raise FileNotFoundError(f"File not found: {file}")
try:
_, ext = os.path.splitext(file)
ext = (
ext.lower()
) # Convert extension to lowercase for case-insensitive comparison
if ext == ".csv":
return csv_to_text(file)
elif ext == ".json":
return json_to_text(file)
elif ext == ".txt":
return txt_to_text(file)
elif ext == ".pdf":
return pdf_to_text(file)
elif ext == ".md":
return md_to_text(file)
else:
# Check if the file is a binary file (like an image)
if ext in [".png", ".jpg", ".jpeg", ".gif", ".bmp"]:
# Skip binary files
return None
else:
with open(file) as file:
data = file.read()
return data
except Exception as e:
raise OSError(f"Error reading file: {file}") from e

@ -1,46 +1,10 @@
import functools
import logging
import threading
import time
import warnings
def log_decorator(func):
def wrapper(*args, **kwargs):
logging.info(f"Entering {func.__name__}")
result = func(*args, **kwargs)
logging.info(f"Exiting {func.__name__}")
return result
return wrapper
def error_decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.error(f"Error in {func.__name__}: {str(e)}")
raise
return wrapper
def timing_decorator(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
logging.info(
f"{func.__name__} executed in"
f" {end_time - start_time} seconds"
)
return result
return wrapper
def retry_decorator(max_retries=5):
def retry_decorator(max_retries: int = 5):
"""
Decorator that retries a function a specified number of times if an exception occurs.

@ -1,127 +0,0 @@
import time
from os import cpu_count
from typing import Any, Callable, List, Optional
from loguru import logger
from pathos.multiprocessing import ProcessingPool as Pool
from typing import Tuple
def execute_parallel_optimized(
callables_with_args: List[
Tuple[Callable[..., Any], Tuple[Any, ...]]
],
max_workers: Optional[int] = None,
chunk_size: Optional[int] = None,
retries: int = 3,
**kwargs,
) -> List[Any]:
"""
Executes a list of callables in parallel, leveraging all available CPU cores.
This function is optimized for high performance and reliability.
Args:
callables_with_args (List[Tuple[Callable[..., Any], Tuple[Any, ...]]]):
A list of tuples, where each tuple contains a callable and a tuple of its arguments.
max_workers (Optional[int]): The maximum number of workers to use. Defaults to the number of available cores.
chunk_size (Optional[int]): The size of chunks to split the tasks into for balanced execution. Defaults to automatic chunking.
retries (int): Number of retries for a failed task. Default is 3.
Returns:
List[Any]: A list of results from each callable. The order corresponds to the order of the input list.
Raises:
Exception: Any exception raised by the callable will be logged and re-raised after retries are exhausted.
"""
max_workers = cpu_count() if max_workers is None else max_workers
results = []
logger.info(
f"Starting optimized parallel execution of {len(callables_with_args)} tasks."
)
pool = Pool(
nodes=max_workers, **kwargs
) # Initialize the pool once
def _execute_with_retry(callable_, args, retries):
attempt = 0
while attempt < retries:
try:
result = callable_(*args)
logger.info(
f"Task {callable_} with args {args} completed successfully."
)
return result
except Exception as e:
attempt += 1
logger.warning(
f"Task {callable_} with args {args} failed on attempt {attempt}: {e}"
)
time.sleep(1) # Small delay before retrying
if attempt >= retries:
logger.error(
f"Task {callable_} with args {args} failed after {retries} retries."
)
raise
try:
if chunk_size is None:
chunk_size = (
len(callables_with_args)
// (max_workers or pool.ncpus)
or 1
)
# Use chunking and mapping for efficient execution
results = pool.map(
lambda item: _execute_with_retry(
item[0], item[1], retries
),
callables_with_args,
chunksize=chunk_size,
)
except Exception as e:
logger.critical(
f"Parallel execution failed due to an error: {e}"
)
raise
logger.info(
f"Optimized parallel execution completed. {len(results)} tasks executed."
)
pool.close() # Ensure pool is properly closed
pool.join()
# return results
# def add(a, b):
# return a + b
# def multiply(a, b):
# return a * b
# def power(a, b):
# return a**b
# # if __name__ == "__main__":
# # # List of callables with their respective arguments
# # callables_with_args = [
# # (add, (2, 3)),
# # (multiply, (5, 4)),
# # (power, (2, 10)),
# # ]
# # # Execute the callables in parallel
# # results = execute_parallel_optimized(callables_with_args)
# # # Print the results
# # print("Results:", results)

@ -1,75 +0,0 @@
from loguru import logger
import sys
import platform
import os
import datetime
# Configuring loguru to log to both the console and a file
logger.remove() # Remove default logger configuration
logger.add(
sys.stderr,
level="INFO",
format="<green>{time}</green> - <level>{level}</level> - <level>{message}</level>",
)
logger.add(
"info.log", level="INFO", format="{time} - {level} - {message}"
)
def log_success_message() -> None:
"""
Logs a success message with instructions for sharing agents on the Swarms Agent Explorer and joining the community for assistance.
Returns:
None
Raises:
None
"""
# Gather extensive context information
context_info = {
"timestamp": datetime.datetime.now().isoformat(),
"python_version": platform.python_version(),
"platform": platform.platform(),
"machine": platform.machine(),
"processor": platform.processor(),
"user": os.getenv("USER") or os.getenv("USERNAME"),
"current_working_directory": os.getcwd(),
}
success_message = (
f"\n"
f"#########################################\n"
f"# #\n"
f"# SUCCESSFUL RUN DETECTED! #\n"
f"# #\n"
f"#########################################\n"
f"\n"
f"Your task completed successfully!\n"
f"\n"
f"Context Information:\n"
f"-----------------------------------------\n"
f"Timestamp: {context_info['timestamp']}\n"
f"Python Version: {context_info['python_version']}\n"
f"Platform: {context_info['platform']}\n"
f"Machine: {context_info['machine']}\n"
f"Processor: {context_info['processor']}\n"
f"User: {context_info['user']}\n"
f"Current Working Directory: {context_info['current_working_directory']}\n"
f"-----------------------------------------\n"
f"\n"
f"Share your agents on the Swarms Agent Explorer with friends:\n"
f"https://swarms.world/platform/explorer\n"
f"\n"
f"Join the Swarms community if you want assistance or help debugging:\n"
f"https://discord.gg/uzu63HQx\n"
f"\n"
f"#########################################\n"
)
logger.info(success_message)
# Example usage:
# log_success_message()

@ -0,0 +1,34 @@
from typing import Union, Dict, List
from swarms.artifacts.main_artifact import Artifact
def handle_artifact_outputs(
file_path: str,
data: Union[str, Dict, List],
output_type: str = "txt",
folder_path: str = "./artifacts",
) -> str:
"""
Handle different types of data and create files in various formats.
Args:
file_path: Path where the file should be saved
data: Input data that can be string, dict or list
output_type: Type of output file (txt, md, pdf, csv, json)
folder_path: Folder to save artifacts
Returns:
str: Path to the created file
"""
# Create artifact with appropriate file type
artifact = Artifact(
folder_path=folder_path,
file_path=file_path,
file_type=output_type,
contents=data,
edit_count=0,
)
# Save the file
# artifact.save()
artifact.save_as(output_format=output_type)

@ -0,0 +1,78 @@
from loguru import logger
from typing import List, Union, Callable, Optional
from swarms.structs.agent import Agent
def reliability_check(
agents: List[Union[Agent, Callable]],
max_loops: int,
name: Optional[str] = None,
description: Optional[str] = None,
flow: Optional[str] = None,
) -> None:
"""
Performs reliability checks on swarm configuration parameters.
Args:
agents: List of Agent objects or callables that will be executed
max_loops: Maximum number of execution loops
name: Name identifier for the swarm
description: Description of the swarm's purpose
Raises:
ValueError: If any parameters fail validation checks
TypeError: If parameters are of incorrect type
"""
logger.info("Initializing swarm reliability checks")
# Type checking
if not isinstance(agents, list):
raise TypeError("agents parameter must be a list")
if not isinstance(max_loops, int):
raise TypeError("max_loops must be an integer")
# Validate agents
if not agents:
raise ValueError("Agents list cannot be empty")
for i, agent in enumerate(agents):
if not isinstance(agent, (Agent, Callable)):
raise TypeError(
f"Agent at index {i} must be an Agent instance or Callable"
)
# Validate max_loops
if max_loops <= 0:
raise ValueError("max_loops must be greater than 0")
if max_loops > 1000:
logger.warning(
"Large max_loops value detected. This may impact performance."
)
# Validate name
if name is None:
raise ValueError("name parameter is required")
if not isinstance(name, str):
raise TypeError("name must be a string")
if len(name.strip()) == 0:
raise ValueError("name cannot be empty or just whitespace")
# Validate description
if description is None:
raise ValueError("description parameter is required")
if not isinstance(description, str):
raise TypeError("description must be a string")
if len(description.strip()) == 0:
raise ValueError(
"description cannot be empty or just whitespace"
)
# Validate flow
if flow is None:
raise ValueError("flow parameter is required")
if not isinstance(flow, str):
raise TypeError("flow must be a string")
logger.info("All reliability checks passed successfully")

@ -0,0 +1,77 @@
import os
from typing import Any
from clusterops import (
execute_on_gpu,
execute_on_multiple_gpus,
execute_with_cpu_cores,
list_available_gpus,
)
from loguru import logger
def exec_callable_with_clusterops(
device: str = "cpu",
device_id: int = 0,
all_cores: bool = True,
all_gpus: bool = False,
func: callable = None,
*args,
**kwargs,
) -> Any:
"""
Executes a given function on a specified device, either CPU or GPU.
This method attempts to execute a given function on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`.
Args:
device (str, optional): The device to use for execution. Defaults to "cpu".
device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
all_gpus (bool, optional): If True, uses all available GPUs. Defaults to False.
func (callable): The function to execute.
*args: Additional positional arguments to be passed to the execution method.
**kwargs: Additional keyword arguments to be passed to the execution method.
Returns:
Any: The result of the execution.
Raises:
ValueError: If an invalid device is specified.
Exception: If any other error occurs during execution.
"""
try:
logger.info(f"Attempting to run on device: {device}")
if device == "cpu":
logger.info("Device set to CPU")
if all_cores is True:
count = os.cpu_count()
logger.info(f"Using all available CPU cores: {count}")
else:
count = device_id
logger.info(f"Using specific CPU core: {count}")
return execute_with_cpu_cores(
count, func, *args, **kwargs
)
# If device gpu
elif device == "gpu":
logger.info("Device set to GPU")
return execute_on_gpu(device_id, func, *args, **kwargs)
elif device == "gpu" and all_gpus is True:
logger.info("Device set to GPU and running all gpus")
gpus = [int(gpu) for gpu in list_available_gpus()]
return execute_on_multiple_gpus(
gpus, func, *args, **kwargs
)
else:
raise ValueError(
f"Invalid device specified: {device}. Supported devices are 'cpu' and 'gpu'."
)
except ValueError as e:
logger.error(f"Invalid device specified: {e}")
raise e
except Exception as e:
logger.error(f"An error occurred during execution: {e}")
raise e

@ -1,117 +0,0 @@
from swarm_models import OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.message_pool import MessagePool
def test_message_pool_initialization():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
agent2 = Agent(llm=OpenAIChat(), agent_name="agent1")
moderator = Agent(llm=OpenAIChat(), agent_name="agent1")
agents = [agent1, agent2]
message_pool = MessagePool(
agents=agents, moderator=moderator, turns=5
)
assert message_pool.agent == agents
assert message_pool.moderator == moderator
assert message_pool.turns == 5
assert message_pool.messages == []
def test_message_pool_add():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
assert message_pool.messages == [
{
"agent": agent1,
"content": "Hello, world!",
"turn": 1,
"visible_to": "all",
"logged": True,
}
]
def test_message_pool_reset():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
message_pool.reset()
assert message_pool.messages == []
def test_message_pool_last_turn():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
assert message_pool.last_turn() == 1
def test_message_pool_last_message():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
assert message_pool.last_message == {
"agent": agent1,
"content": "Hello, world!",
"turn": 1,
"visible_to": "all",
"logged": True,
}
def test_message_pool_get_all_messages():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
assert message_pool.get_all_messages() == [
{
"agent": agent1,
"content": "Hello, world!",
"turn": 1,
"visible_to": "all",
"logged": True,
}
]
def test_message_pool_get_visible_messages():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
agent2 = Agent(agent_name="agent2")
message_pool = MessagePool(
agents=[agent1, agent2], moderator=agent1, turns=5
)
message_pool.add(
agent=agent1,
content="Hello, agent2!",
turn=1,
visible_to=[agent2.agent_name],
)
assert message_pool.get_visible_messages(
agent=agent2, turn=2
) == [
{
"agent": agent1,
"content": "Hello, agent2!",
"turn": 1,
"visible_to": [agent2.agent_name],
"logged": True,
}
]
Loading…
Cancel
Save