[7.6.3] -- [FEAT][Agent.run() acceleration] [FIX][Groupchat]

pull/805/head
Kye Gomez 2 weeks ago
parent 57eca9b6a2
commit 5af454020a

@ -29,8 +29,6 @@ BRAVESEARCH_API_KEY=""
TAVILY_API_KEY="" TAVILY_API_KEY=""
YOU_API_KEY="" YOU_API_KEY=""
## Analytics & Monitoring
AGENTOPS_API_KEY=""
EXA_API_KEY="" EXA_API_KEY=""
## Browser Automation ## Browser Automation

@ -427,8 +427,6 @@ We provide vast array of features to save agent states using json, yaml, toml, u
| `tokens_checks()` | Performs token checks for the agent. | | `tokens_checks()` | Performs token checks for the agent. |
| `print_dashboard()` | Prints the dashboard of the agent. | | `print_dashboard()` | Prints the dashboard of the agent. |
| `get_docs_from_doc_folders()` | Fetches all the documents from the doc folders. | | `get_docs_from_doc_folders()` | Fetches all the documents from the doc folders. |
| `activate_agentops()` | Activates agent operations. |
| `check_end_session_agentops()` | Checks the end of the session for agent operations. |
@ -480,8 +478,6 @@ agent.print_dashboard()
agent.get_docs_from_doc_folders() agent.get_docs_from_doc_folders()
# Activate agent ops # Activate agent ops
agent.activate_agentops()
agent.check_end_session_agentops()
# Dump the model to a JSON file # Dump the model to a JSON file
agent.model_dump_json() agent.model_dump_json()

@ -4,7 +4,7 @@ from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import ( from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT, FINANCIAL_AGENT_SYS_PROMPT,
) )
from swarms.utils.str_to_dict import str_to_dict
load_dotenv() load_dotenv()
@ -49,16 +49,11 @@ agent = Agent(
system_prompt=FINANCIAL_AGENT_SYS_PROMPT, system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1, max_loops=1,
tools_list_dictionary=tools, tools_list_dictionary=tools,
output_type="final",
) )
out = agent.run( out = agent.run(
"What is the current stock price for Apple Inc. (AAPL)? Include historical price data.", "What is the current stock price for Apple Inc. (AAPL)? Include historical price data.",
) )
print(out)
print(type(out)) print(type(out))
print(str_to_dict(out))
print(type(str_to_dict(out)))

@ -1,105 +0,0 @@
## Usage Documentation: Discord Bot with Advanced Features
---
### Overview:
This code provides a structure for a Discord bot with advanced features such as voice channel interactions, image generation, and text-based interactions using OpenAI models.
---
### Setup:
1. Ensure that the necessary libraries are installed:
```bash
pip install discord.py python-dotenv dalle3 invoke openai
```
2. Create a `.env` file in the same directory as your bot script and add the following:
```
DISCORD_TOKEN=your_discord_bot_token
STORAGE_SERVICE=your_storage_service_endpoint
SAVE_DIRECTORY=path_to_save_generated_images
```
---
### Bot Class and its Methods:
#### `__init__(self, agent, llm, command_prefix="!")`:
Initializes the bot with the given agent, language model (`llm`), and a command prefix (default is `!`).
#### `add_command(self, name, func)`:
Allows you to dynamically add new commands to the bot. The `name` is the command's name and `func` is the function to execute when the command is called.
#### `run(self)`:
Starts the bot using the `DISCORD_TOKEN` from the `.env` file.
---
### Commands:
1. **!greet**: Greets the user.
2. **!help_me**: Provides a list of commands and their descriptions.
3. **!join**: Joins the voice channel the user is in.
4. **!leave**: Leaves the voice channel the bot is currently in.
5. **!listen**: Starts listening to voice in the current voice channel and records the audio.
6. **!generate_image [prompt]**: Generates images based on the provided prompt using the DALL-E3 model.
7. **!send_text [text] [use_agent=True]**: Sends the provided text to the worker (either the agent or the LLM) and returns the response.
---
### Usage:
Initialize the `llm` (Language Learning Model) with your OpenAI API key:
```python
from swarm_models import OpenAIChat
llm = OpenAIChat(
openai_api_key="Your_OpenAI_API_Key",
temperature=0.5,
)
```
Initialize the bot with the `llm`:
```python
from apps.discord import Bot
bot = Bot(llm=llm)
```
Send a task to the bot:
```python
task = "What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."
bot.send_text(task)
```
Start the bot:
```python
bot.run()
```
---
### Additional Notes:
- The bot makes use of the `dalle3` library for image generation. Ensure you have the model and necessary setup for it.
- For the storage service, you might want to integrate with a cloud service like Google Cloud Storage or AWS S3 to store and retrieve generated images. The given code assumes a method `.upload()` for the storage service to upload files.
- Ensure that you've granted the bot necessary permissions on Discord, especially if you want to use voice channel features.
- Handle API keys and tokens securely. Avoid hardcoding them directly into your code. Use environment variables or secure secret management tools.

@ -0,0 +1,765 @@
# Swarms API: Orchestrating the Future of AI Agent Collaboration
In today's rapidly evolving AI landscape, we're witnessing a fundamental shift from single-agent AI systems to complex, collaborative multi-agent architectures. While individual AI models like GPT-4 and Claude have demonstrated remarkable capabilities, they often struggle with complex tasks requiring diverse expertise, nuanced decision-making, and specialized domain knowledge. Enter the Swarms API, an enterprise-grade solution designed to orchestrate collaborative intelligence through coordinated AI agent swarms.
## The Problem: The Limitations of Single-Agent AI
Despite significant advances in large language models and AI systems, single-agent architectures face inherent limitations when tackling complex real-world problems:
### Expertise Boundaries
Even the most advanced AI models have knowledge boundaries. No single model can possess expert-level knowledge across all domains simultaneously. When a task requires deep expertise in multiple areas (finance, law, medicine, and technical analysis, for example), a single agent quickly reaches its limits.
### Complex Reasoning Chains
Many real-world problems demand multistep reasoning with multiple feedback loops and verification processes. Single agents often struggle to maintain reasoning coherence through extended problem-solving journeys, leading to errors that compound over time.
### Workflow Orchestration
Enterprise applications frequently require sophisticated workflows with multiple handoffs, approvals, and specialized processing steps. Managing this orchestration with individual AI instances is inefficient and error-prone.
### Resource Optimization
Deploying high-powered AI models for every task is expensive and inefficient. Organizations need right-sized solutions that match computing resources to task requirements.
### Collaboration Mechanisms
The most sophisticated human problem-solving happens in teams, where specialists collaborate, debate, and refine solutions together. This collaborative intelligence is difficult to replicate with isolated AI agents.
## The Solution: Swarms API
The Swarms API addresses these challenges through a revolutionary approach to AI orchestration. By enabling multiple specialized agents to collaborate in coordinated swarms, it unlocks new capabilities previously unattainable with single-agent architectures.
### What is the Swarms API?
The Swarms API is an enterprise-grade platform that enables organizations to deploy and manage intelligent agent swarms in the cloud. Rather than relying on a single AI agent to handle complex tasks, the Swarms API orchestrates teams of specialized AI agents that work together, each handling specific aspects of a larger problem.
The platform provides a robust infrastructure for creating, executing, and managing sophisticated AI agent workflows without the burden of maintaining the underlying infrastructure. With its cloud-native architecture, the Swarms API offers scalability, reliability, and security essential for enterprise deployments.
## Core Capabilities
The Swarms API delivers a comprehensive suite of capabilities designed for production-grade AI orchestration:
### Intelligent Swarm Management
At its core, the Swarms API enables the creation and execution of collaborative agent swarms. These swarms consist of specialized AI agents designed to work together on complex tasks. Unlike traditional AI approaches where a single model handles the entire workload, swarms distribute tasks among specialized agents, each contributing its expertise to the collective solution.
For example, a financial analysis swarm might include:
- A data preprocessing agent that cleans and normalizes financial data
- A market analyst agent that identifies trends and patterns
- An economic forecasting agent that predicts future market conditions
- A report generation agent that compiles insights into a comprehensive analysis
By coordinating these specialized agents, the swarm can deliver more accurate, nuanced, and valuable results than any single agent could produce alone.
### Automatic Agent Generation
One of the most powerful features of the Swarms API is its ability to dynamically create optimized agents based on task requirements. Rather than manually configuring each agent in a swarm, users can specify the overall task and let the platform automatically generate appropriate agents with optimized prompts and configurations.
This automatic agent generation significantly reduces the expertise and effort required to deploy effective AI solutions. The system analyzes the task requirements and creates a set of agents specifically designed to address different aspects of the problem. This approach not only saves time but also improves the quality of results by ensuring each agent is properly configured for its specific role.
### Multiple Swarm Architectures
Different problems require different collaboration patterns. The Swarms API supports various swarm architectures to match specific workflow needs:
- **SequentialWorkflow**: Agents work in a predefined sequence, with each agent handling specific subtasks in order
- **ConcurrentWorkflow**: Multiple agents work simultaneously on different aspects of a task
- **GroupChat**: Agents collaborate in a discussion format to solve problems collectively
- **HierarchicalSwarm**: Organizes agents in a structured hierarchy with managers and workers
- **MajorityVoting**: Uses a consensus mechanism where multiple agents vote on the best solution
- **AutoSwarmBuilder**: Automatically designs and builds an optimal swarm architecture based on the task
- **MixtureOfAgents**: Combines multiple agent types to tackle diverse aspects of a problem
- **MultiAgentRouter**: Routes subtasks to specialized agents based on their capabilities
- **AgentRearrange**: Dynamically reorganizes the workflow between agents based on evolving task requirements
This flexibility allows organizations to select the most appropriate collaboration pattern for each specific use case, optimizing the balance between efficiency, thoroughness, and creativity.
### Scheduled Execution
The Swarms API enables automated, scheduled swarm executions, allowing organizations to set up recurring tasks that run automatically at specified times. This feature is particularly valuable for regular reporting, monitoring, and analysis tasks that need to be performed on a consistent schedule.
For example, a financial services company could schedule a daily market analysis swarm to run before trading hours, providing updated insights based on overnight market movements. Similarly, a cybersecurity team might schedule hourly security assessment swarms to continuously monitor potential threats.
### Comprehensive Logging
Transparency and auditability are essential for enterprise AI applications. The Swarms API provides comprehensive logging capabilities that track all API interactions, agent communications, and decision processes. This detailed logging enables:
- Debugging and troubleshooting swarm behaviors
- Auditing decision trails for compliance and quality assurance
- Analyzing performance patterns to identify optimization opportunities
- Documenting the rationale behind AI-generated recommendations
These logs provide valuable insights into how swarms operate and make decisions, increasing trust and enabling continuous improvement of AI workflows.
### Cost Management
AI deployment costs can quickly escalate without proper oversight. The Swarms API addresses this challenge through:
- **Predictable, transparent pricing**: Clear cost structures that make budgeting straightforward
- **Optimized resource utilization**: Intelligent allocation of computing resources based on task requirements
- **Detailed cost breakdowns**: Comprehensive reporting on token usage, agent costs, and total expenditures
- **Model flexibility**: Freedom to choose the most cost-effective models for each agent based on task complexity
This approach ensures organizations get maximum value from their AI investments without unexpected cost overruns.
### Enterprise Security
Security is paramount for enterprise AI deployments. The Swarms API implements robust security measures including:
- **Full API key authentication**: Secure access control for all API interactions
- **Comprehensive key management**: Tools for creating, rotating, and revoking API keys
- **Usage monitoring**: Tracking and alerting for suspicious activity patterns
- **Secure data handling**: Appropriate data protection throughout the swarm execution lifecycle
These security features ensure that sensitive data and AI workflows remain protected in accordance with enterprise security requirements.
## How It Works: Behind the Scenes
The Swarms API operates on a sophisticated architecture designed for reliability, scalability, and performance. Here's a look at what happens when you submit a task to the Swarms API:
1. **Task Submission**: You send a request to the API with your task description and desired swarm configuration.
2. **Swarm Configuration**: The system either uses your specified agent configuration or automatically generates an optimal swarm structure based on the task requirements.
3. **Agent Initialization**: Each agent in the swarm is initialized with its specific instructions, model parameters, and role definitions.
4. **Orchestration Setup**: The system establishes the communication and workflow patterns between agents based on the selected swarm architecture.
5. **Execution**: The swarm begins working on the task, with agents collaborating according to their defined roles and relationships.
6. **Monitoring and Adjustment**: Throughout execution, the system monitors agent performance and makes adjustments as needed.
7. **Result Compilation**: Once the task is complete, the system compiles the results into the requested format.
8. **Response Delivery**: The final output is returned to you, along with metadata about the execution process.
This entire process happens seamlessly in the cloud, with the Swarms API handling all the complexities of agent coordination, resource allocation, and workflow management.
## Real-World Applications
The Swarms API enables a wide range of applications across industries. Here are some compelling use cases that demonstrate its versatility:
### Financial Services
#### Investment Research
Financial institutions can deploy research swarms that combine market analysis, economic forecasting, company evaluation, and risk assessment. These swarms can evaluate investment opportunities much more comprehensively than single-agent systems, considering multiple factors simultaneously:
- Macroeconomic indicators
- Company fundamentals
- Market sentiment
- Technical analysis patterns
- Regulatory considerations
For example, an investment research swarm analyzing a potential stock purchase might include specialists in the company's industry, financial statement analysis, market trend identification, and risk assessment. This collaborative approach delivers more nuanced insights than any single analyst or model could produce independently.
#### Regulatory Compliance
Financial regulations are complex and constantly evolving. Compliance swarms can monitor regulatory changes, assess their impact on existing policies, and recommend appropriate adjustments. These swarms might include:
- Regulatory monitoring agents that track new rules and guidelines
- Policy analysis agents that evaluate existing compliance frameworks
- Gap assessment agents that identify discrepancies
- Documentation agents that update compliance materials
This approach ensures comprehensive coverage of regulatory requirements while minimizing compliance risks.
### Healthcare
#### Medical Research Analysis
The medical literature grows at an overwhelming pace, making it difficult for researchers and clinicians to stay current. Research analysis swarms can continuously scan new publications, identify relevant findings, and synthesize insights for specific research questions or clinical scenarios.
A medical research swarm might include:
- Literature scanning agents that identify relevant publications
- Methodology assessment agents that evaluate research quality
- Clinical relevance agents that determine practical applications
- Summary agents that compile key findings into accessible reports
This collaborative approach enables more thorough literature reviews and helps bridge the gap between research and clinical practice.
#### Treatment Planning
Complex medical cases often benefit from multidisciplinary input. Treatment planning swarms can integrate perspectives from different medical specialties, consider patient-specific factors, and recommend comprehensive care approaches.
For example, an oncology treatment planning swarm might include specialists in:
- Diagnostic interpretation
- Treatment protocol evaluation
- Drug interaction assessment
- Patient history analysis
- Evidence-based outcome prediction
By combining these specialized perspectives, the swarm can develop more personalized and effective treatment recommendations.
### Legal Services
#### Contract Analysis
Legal contracts contain numerous interconnected provisions that must be evaluated holistically. Contract analysis swarms can review complex agreements more thoroughly by assigning different sections to specialized agents:
- Definition analysis agents that ensure consistent terminology
- Risk assessment agents that identify potential liabilities
- Compliance agents that check regulatory requirements
- Precedent comparison agents that evaluate terms against standards
- Conflict detection agents that identify internal inconsistencies
This distributed approach enables more comprehensive contract reviews while reducing the risk of overlooking critical details.
#### Legal Research
Legal research requires examining statutes, case law, regulations, and scholarly commentary. Research swarms can conduct multi-faceted legal research by coordinating specialized agents focusing on different aspects of the legal landscape.
A legal research swarm might include:
- Statutory analysis agents that examine relevant laws
- Case law agents that review judicial precedents
- Regulatory agents that assess administrative rules
- Scholarly analysis agents that evaluate academic perspectives
- Synthesis agents that integrate findings into cohesive arguments
This collaborative approach produces more comprehensive legal analyses that consider multiple sources of authority.
### Research and Development
#### Scientific Literature Review
Scientific research increasingly spans multiple disciplines, making comprehensive literature reviews challenging. Literature review swarms can analyze publications across relevant fields, identify methodological approaches, and synthesize findings from diverse sources.
For example, a biomedical engineering literature review swarm might include specialists in:
- Materials science
- Cellular biology
- Clinical applications
- Regulatory requirements
- Statistical methods
By integrating insights from these different perspectives, the swarm can produce more comprehensive and valuable literature reviews.
#### Experimental Design
Designing robust experiments requires considering multiple factors simultaneously. Experimental design swarms can develop sophisticated research protocols by integrating methodological expertise, statistical considerations, practical constraints, and ethical requirements.
An experimental design swarm might coordinate:
- Methodology agents that design experimental procedures
- Statistical agents that determine appropriate sample sizes and analyses
- Logistics agents that assess practical feasibility
- Ethics agents that evaluate potential concerns
- Documentation agents that prepare formal protocols
This collaborative approach leads to more rigorous experimental designs while addressing potential issues preemptively.
### Software Development
#### Code Review and Optimization
Code review requires evaluating multiple aspects simultaneously: functionality, security, performance, maintainability, and adherence to standards. Code review swarms can distribute these concerns among specialized agents:
- Functionality agents that evaluate whether code meets requirements
- Security agents that identify potential vulnerabilities
- Performance agents that assess computational efficiency
- Style agents that check adherence to coding standards
- Documentation agents that review comments and documentation
By addressing these different aspects in parallel, code review swarms can provide more comprehensive feedback to development teams.
#### System Architecture Design
Designing complex software systems requires balancing numerous considerations. Architecture design swarms can develop more robust system designs by coordinating specialists in different architectural concerns:
- Scalability agents that evaluate growth potential
- Security agents that assess protective measures
- Performance agents that analyze efficiency
- Maintainability agents that consider long-term management
- Integration agents that evaluate external system connections
This collaborative approach leads to more balanced architectural decisions that address multiple requirements simultaneously.
## Getting Started with the Swarms API
The Swarms API is designed for straightforward integration into existing workflows. Let's walk through the setup process and explore some practical code examples for different industries.
### 1. Setting Up Your Environment
First, create an account on [swarms.world](https://swarms.world). After registration, navigate to the API key management interface at [https://swarms.world/platform/api-keys](https://swarms.world/platform/api-keys) to generate your API key.
Once you have your API key, set up your Python environment:
```python
# Install required packages
pip install requests python-dotenv
```
Create a basic project structure:
```
swarms-project/
├── .env # Store your API key securely
├── swarms_client.py # Helper functions for API interaction
└── examples/ # Industry-specific examples
```
In your `.env` file, add your API key:
```
SWARMS_API_KEY=your_api_key_here
```
### 2. Creating a Basic Swarms Client
Let's create a simple client to interact with the Swarms API:
```python
# swarms_client.py
import os
import requests
from dotenv import load_dotenv
import json
# Load environment variables
load_dotenv()
# Configuration
API_KEY = os.getenv("SWARMS_API_KEY")
BASE_URL = "https://api.swarms.world"
# Standard headers for all requests
headers = {
"x-api-key": API_KEY,
"Content-Type": "application/json"
}
def check_api_health():
"""Simple health check to verify API connectivity."""
response = requests.get(f"{BASE_URL}/health", headers=headers)
return response.json()
def run_swarm(swarm_config):
"""Execute a swarm with the provided configuration."""
response = requests.post(
f"{BASE_URL}/v1/swarm/completions",
headers=headers,
json=swarm_config
)
return response.json()
def get_available_swarms():
"""Retrieve list of available swarm types."""
response = requests.get(f"{BASE_URL}/v1/swarms/available", headers=headers)
return response.json()
def get_available_models():
"""Retrieve list of available AI models."""
response = requests.get(f"{BASE_URL}/v1/models/available", headers=headers)
return response.json()
def get_swarm_logs():
"""Retrieve logs of previous swarm executions."""
response = requests.get(f"{BASE_URL}/v1/swarm/logs", headers=headers)
return response.json()
```
### 3. Industry-Specific Examples
Let's explore practical applications of the Swarms API across different industries.
#### Healthcare: Clinical Research Assistant
This example creates a swarm that analyzes clinical trial data and summarizes findings:
```python
# healthcare_example.py
from swarms_client import run_swarm
import json
def clinical_research_assistant():
"""
Create a swarm that analyzes clinical trial data, identifies patterns,
and generates comprehensive research summaries.
"""
swarm_config = {
"name": "Clinical Research Assistant",
"description": "Analyzes medical research data and synthesizes findings",
"agents": [
{
"agent_name": "Data Preprocessor",
"description": "Cleans and organizes clinical trial data",
"system_prompt": "You are a data preprocessing specialist focused on clinical trials. "
"Your task is to organize, clean, and structure raw clinical data for analysis. "
"Identify and handle missing values, outliers, and inconsistencies in the data.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Clinical Analyst",
"description": "Analyzes preprocessed data to identify patterns and insights",
"system_prompt": "You are a clinical research analyst with expertise in interpreting medical data. "
"Your job is to examine preprocessed clinical trial data, identify significant patterns, "
"and determine the clinical relevance of these findings. Consider factors such as "
"efficacy, safety profiles, and patient subgroups.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Medical Writer",
"description": "Synthesizes analysis into comprehensive reports",
"system_prompt": "You are a medical writer specializing in clinical research. "
"Your task is to take the analyses provided and create comprehensive, "
"well-structured reports that effectively communicate findings to both "
"medical professionals and regulatory authorities. Follow standard "
"medical publication guidelines.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
}
],
"max_loops": 1,
"swarm_type": "SequentialWorkflow",
"task": "Analyze the provided Phase III clinical trial data for Drug XYZ, "
"a novel treatment for type 2 diabetes. Identify efficacy patterns across "
"different patient demographics, note any safety concerns, and prepare "
"a comprehensive summary suitable for submission to regulatory authorities."
}
# Execute the swarm
result = run_swarm(swarm_config)
# Print formatted results
print(json.dumps(result, indent=4))
return result
if __name__ == "__main__":
clinical_research_assistant()
```
#### Legal: Contract Analysis System
This example demonstrates a swarm designed to analyze complex legal contracts:
```python
# legal_example.py
from swarms_client import run_swarm
import json
def contract_analysis_system():
"""
Create a swarm that thoroughly analyzes legal contracts,
identifies potential risks, and suggests improvements.
"""
swarm_config = {
"name": "Contract Analysis System",
"description": "Analyzes legal contracts for risks and improvement opportunities",
"agents": [
{
"agent_name": "Clause Extractor",
"description": "Identifies and categorizes key clauses in contracts",
"system_prompt": "You are a legal document specialist. Your task is to "
"carefully review legal contracts and identify all key clauses, "
"categorizing them by type (liability, indemnification, termination, etc.). "
"Extract each clause with its context and prepare them for detailed analysis.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Risk Assessor",
"description": "Evaluates clauses for potential legal risks",
"system_prompt": "You are a legal risk assessment expert. Your job is to "
"analyze contract clauses and identify potential legal risks, "
"exposure points, and unfavorable terms. Rate each risk on a "
"scale of 1-5 and provide justification for your assessment.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Improvement Recommender",
"description": "Suggests alternative language to mitigate risks",
"system_prompt": "You are a contract drafting expert. Based on the risk "
"assessment provided, suggest alternative language for "
"problematic clauses to better protect the client's interests. "
"Ensure suggestions are legally sound and professionally worded.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Summary Creator",
"description": "Creates executive summary of findings and recommendations",
"system_prompt": "You are a legal communication specialist. Create a clear, "
"concise executive summary of the contract analysis, highlighting "
"key risks and recommendations. Your summary should be understandable "
"to non-legal executives while maintaining accuracy.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
}
],
"max_loops": 1,
"swarm_type": "SequentialWorkflow",
"task": "Analyze the attached software licensing agreement between TechCorp and ClientInc. "
"Identify all key clauses, assess potential risks to ClientInc, suggest improvements "
"to better protect ClientInc's interests, and create an executive summary of findings."
}
# Execute the swarm
result = run_swarm(swarm_config)
# Print formatted results
print(json.dumps(result, indent=4))
return result
if __name__ == "__main__":
contract_analysis_system()
```
#### Private Equity: Investment Opportunity Analysis
This example shows a swarm that performs comprehensive due diligence on potential investments:
```python
# private_equity_example.py
from swarms_client import run_swarm, schedule_swarm
import json
from datetime import datetime, timedelta
def investment_opportunity_analysis():
"""
Create a swarm that performs comprehensive due diligence
on potential private equity investment opportunities.
"""
swarm_config = {
"name": "PE Investment Analyzer",
"description": "Performs comprehensive analysis of private equity investment opportunities",
"agents": [
{
"agent_name": "Financial Analyst",
"description": "Analyzes financial statements and projections",
"system_prompt": "You are a private equity financial analyst with expertise in "
"evaluating company financials. Review the target company's financial "
"statements, analyze growth trajectories, profit margins, cash flow patterns, "
"and debt structure. Identify financial red flags and growth opportunities.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Market Researcher",
"description": "Assesses market conditions and competitive landscape",
"system_prompt": "You are a market research specialist in the private equity sector. "
"Analyze the target company's market position, industry trends, competitive "
"landscape, and growth potential. Identify market-related risks and opportunities "
"that could impact investment returns.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Operational Due Diligence",
"description": "Evaluates operational efficiency and improvement opportunities",
"system_prompt": "You are an operational due diligence expert. Analyze the target "
"company's operational structure, efficiency metrics, supply chain, "
"technology infrastructure, and management capabilities. Identify "
"operational improvement opportunities that could increase company value.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Risk Assessor",
"description": "Identifies regulatory, legal, and business risks",
"system_prompt": "You are a risk assessment specialist in private equity. "
"Evaluate potential regulatory challenges, legal liabilities, "
"compliance issues, and business model vulnerabilities. Rate "
"each risk based on likelihood and potential impact.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Investment Thesis Creator",
"description": "Synthesizes analysis into comprehensive investment thesis",
"system_prompt": "You are a private equity investment strategist. Based on the "
"analyses provided, develop a comprehensive investment thesis "
"that includes valuation assessment, potential returns, value "
"creation opportunities, exit strategies, and investment recommendations.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
}
],
"max_loops": 1,
"swarm_type": "SequentialWorkflow",
"task": "Perform comprehensive due diligence on HealthTech Inc., a potential acquisition "
"target in the healthcare technology sector. The company develops remote patient "
"monitoring solutions and has shown 35% year-over-year growth for the past three years. "
"Analyze financials, market position, operational structure, potential risks, and "
"develop an investment thesis with a recommended valuation range."
}
# Option 1: Execute the swarm immediately
result = run_swarm(swarm_config)
# Option 2: Schedule the swarm for tomorrow morning
tomorrow = (datetime.now() + timedelta(days=1)).replace(hour=8, minute=0, second=0).isoformat()
# scheduled_result = schedule_swarm(swarm_config, tomorrow, "America/New_York")
# Print formatted results from immediate execution
print(json.dumps(result, indent=4))
return result
if __name__ == "__main__":
investment_opportunity_analysis()
```
#### Education: Curriculum Development Assistant
This example shows how to use the Concurrent Workflow swarm type:
```python
# education_example.py
from swarms_client import run_swarm
import json
def curriculum_development_assistant():
"""
Create a swarm that assists in developing educational curriculum
with concurrent subject matter experts.
"""
swarm_config = {
"name": "Curriculum Development Assistant",
"description": "Develops comprehensive educational curriculum",
"agents": [
{
"agent_name": "Subject Matter Expert",
"description": "Provides domain expertise on the subject",
"system_prompt": "You are a subject matter expert in data science. "
"Your role is to identify the essential concepts, skills, "
"and knowledge that students need to master in a comprehensive "
"data science curriculum. Focus on both theoretical foundations "
"and practical applications, ensuring the content reflects current "
"industry standards and practices.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Instructional Designer",
"description": "Structures learning objectives and activities",
"system_prompt": "You are an instructional designer specializing in technical education. "
"Your task is to transform subject matter content into structured learning "
"modules with clear objectives, engaging activities, and appropriate assessments. "
"Design the learning experience to accommodate different learning styles and "
"knowledge levels.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Assessment Specialist",
"description": "Develops evaluation methods and assessments",
"system_prompt": "You are an educational assessment specialist. "
"Design comprehensive assessment strategies to evaluate student "
"learning throughout the curriculum. Create formative and summative "
"assessments, rubrics, and feedback mechanisms that align with learning "
"objectives and provide meaningful insights into student progress.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
},
{
"agent_name": "Curriculum Integrator",
"description": "Synthesizes input from all specialists into a cohesive curriculum",
"system_prompt": "You are a curriculum development coordinator. "
"Your role is to synthesize the input from subject matter experts, "
"instructional designers, and assessment specialists into a cohesive, "
"comprehensive curriculum. Ensure logical progression of topics, "
"integration of theory and practice, and alignment between content, "
"activities, and assessments.",
"model_name": "gpt-4o",
"role": "worker",
"max_loops": 1
}
],
"max_loops": 1,
"swarm_type": "ConcurrentWorkflow", # Experts work simultaneously before integration
"task": "Develop a comprehensive 12-week data science curriculum for advanced undergraduate "
"students with programming experience. The curriculum should cover data analysis, "
"machine learning, data visualization, and ethics in AI. Include weekly learning "
"objectives, teaching materials, hands-on activities, and assessment methods. "
"The curriculum should prepare students for entry-level data science positions."
}
# Execute the swarm
result = run_swarm(swarm_config)
# Print formatted results
print(json.dumps(result, indent=4))
return result
if __name__ == "__main__":
curriculum_development_assistant()
```
### 5. Monitoring and Optimization
To optimize your swarm configurations and track usage patterns, you can retrieve and analyze logs:
```python
# analytics_example.py
from swarms_client import get_swarm_logs
import json
def analyze_swarm_usage():
"""
Analyze swarm usage patterns to optimize configurations and costs.
"""
# Retrieve logs
logs = get_swarm_logs()
return logs
if __name__ == "__main__":
analyze_swarm_usage()
```
### 6. Next Steps
Once you've implemented and tested these examples, you can further optimize your swarm configurations by:
1. Experimenting with different swarm architectures for the same task to compare results
2. Adjusting agent prompts to improve specialization and collaboration
3. Fine-tuning model parameters like temperature and max_tokens
4. Combining swarms into larger workflows through scheduled execution
The Swarms API's flexibility allows for continuous refinement of your AI orchestration strategies, enabling increasingly sophisticated solutions to complex problems.
## The Future of AI Agent Orchestration
The Swarms API represents a significant evolution in how we deploy AI for complex tasks. As we look to the future, several trends are emerging in the field of agent orchestration:
### Specialized Agent Ecosystems
We're moving toward rich ecosystems of highly specialized agents designed for specific tasks and domains. These specialized agents will have deep expertise in narrow areas, enabling more sophisticated collaboration when combined in swarms.
### Dynamic Swarm Formation
Future swarm platforms will likely feature even more advanced capabilities for dynamic swarm formation, where the system automatically determines not only which agents to include but also how they should collaborate based on real-time task analysis.
### Cross-Modal Collaboration
As AI capabilities expand across modalities (text, image, audio, video), we'll see increasing collaboration between agents specialized in different data types. This cross-modal collaboration will enable more comprehensive analysis and content creation spanning multiple formats.
### Human-Swarm Collaboration
The next frontier in agent orchestration will be seamless collaboration between human teams and AI swarms, where human specialists and AI agents work together, each contributing their unique strengths to complex problems.
### Continuous Learning Swarms
Future swarms will likely incorporate more sophisticated mechanisms for continuous improvement, with agent capabilities evolving based on past performance and feedback.
## Conclusion
The Swarms API represents a significant leap forward in AI orchestration, moving beyond the limitations of single-agent systems to unlock the power of collaborative intelligence. By enabling specialized agents to work together in coordinated swarms, this enterprise-grade platform opens new possibilities for solving complex problems across industries.
From financial analysis to healthcare research, legal services to software development, the applications for agent swarms are as diverse as they are powerful. The Swarms API provides the infrastructure, tools, and flexibility needed to deploy these collaborative AI systems at scale, with the security, reliability, and cost management features essential for enterprise adoption.
As we continue to push the boundaries of what AI can accomplish, the ability to orchestrate collaborative intelligence will become increasingly crucial. The Swarms API is at the forefront of this evolution, providing a glimpse into the future of AI—a future where the most powerful AI systems aren't individual models but coordinated teams of specialized agents working together to solve our most challenging problems.
For organizations looking to harness the full potential of AI, the Swarms API offers a compelling path forward—one that leverages the power of collaboration to achieve results beyond what any single AI agent could accomplish alone.
To explore the Swarms API and begin building your own intelligent agent swarms, visit [swarms.world](https://swarms.world) today.
---
## Resources
* Website: [swarms.ai](https://swarms.ai)
* Marketplace: [swarms.world](https://swarms.world)
* Cloud Platform: [cloud.swarms.ai](https://cloud.swarms.ai)
* Documentation: [docs.swarms.world](https://docs.swarms.world/en/latest/swarms_cloud/swarms_api/)

@ -45,6 +45,7 @@ plugins:
# include_requirejs: true # include_requirejs: true
extra_css: extra_css:
- assets/css/extra.css - assets/css/extra.css
extra: extra:
social: social:
- icon: fontawesome/brands/twitter - icon: fontawesome/brands/twitter
@ -60,6 +61,20 @@ extra:
provider: google provider: google
property: G-MPE9C65596 property: G-MPE9C65596
alternate:
- name: English
link: /
lang: en
- name: 简体中文
link: /zh/
lang: zh
- name: 日本語
link: /ja/
lang: ja
- name: 한국어
link: /ko/
lang: ko
theme: theme:
name: material name: material
custom_dir: overrides custom_dir: overrides
@ -90,6 +105,23 @@ theme:
code: "Fira Code" # Modern look for code snippets code: "Fira Code" # Modern look for code snippets
# Add language selector
language: en
alternate:
- name: English
link: /
lang: en
- name: 简体中文
link: /zh/
lang: zh
- name: 日本語
link: /ja/
lang: ja
- name: 한국어
link: /ko/
lang: ko
# Extensions # Extensions
markdown_extensions: markdown_extensions:
- abbr - abbr

@ -58,7 +58,6 @@ Swarms uses environment variables for configuration management and secure creden
- `YOU_API_KEY`: You.com search integration - `YOU_API_KEY`: You.com search integration
2. **Analytics & Monitoring** 2. **Analytics & Monitoring**
- `AGENTOPS_API_KEY`: AgentOps monitoring
- `EXA_API_KEY`: Exa.ai services - `EXA_API_KEY`: Exa.ai services
3. **Browser Automation** 3. **Browser Automation**

@ -183,7 +183,6 @@ graph TD
| `remove_tool(tool)` | Removes a tool from the agent's toolset. | `tool` (Callable): Tool to remove. | `agent.remove_tool(my_custom_tool)` | | `remove_tool(tool)` | Removes a tool from the agent's toolset. | `tool` (Callable): Tool to remove. | `agent.remove_tool(my_custom_tool)` |
| `remove_tools(tools)` | Removes multiple tools from the agent's toolset. | `tools` (List[Callable]): List of tools to remove. | `agent.remove_tools([tool1, tool2])` | | `remove_tools(tools)` | Removes multiple tools from the agent's toolset. | `tools` (List[Callable]): List of tools to remove. | `agent.remove_tools([tool1, tool2])` |
| `get_docs_from_doc_folders()` | Retrieves and processes documents from the specified folder. | None | `agent.get_docs_from_doc_folders()` | | `get_docs_from_doc_folders()` | Retrieves and processes documents from the specified folder. | None | `agent.get_docs_from_doc_folders()` |
| `check_end_session_agentops()` | Checks and ends the AgentOps session if enabled. | None | `agent.check_end_session_agentops()` |
| `memory_query(task, *args, **kwargs)` | Queries the long-term memory for relevant information. | `task` (str): The task or query.<br>`*args`, `**kwargs`: Additional arguments. | `result = agent.memory_query("Find information about X")` | | `memory_query(task, *args, **kwargs)` | Queries the long-term memory for relevant information. | `task` (str): The task or query.<br>`*args`, `**kwargs`: Additional arguments. | `result = agent.memory_query("Find information about X")` |
| `sentiment_analysis_handler(response)` | Performs sentiment analysis on the given response. | `response` (str): The response to analyze. | `agent.sentiment_analysis_handler("Great job!")` | | `sentiment_analysis_handler(response)` | Performs sentiment analysis on the given response. | `response` (str): The response to analyze. | `agent.sentiment_analysis_handler("Great job!")` |
| `count_and_shorten_context_window(history, *args, **kwargs)` | Counts tokens and shortens the context window if necessary. | `history` (str): The conversation history.<br>`*args`, `**kwargs`: Additional arguments. | `shortened_history = agent.count_and_shorten_context_window(history)` | | `count_and_shorten_context_window(history, *args, **kwargs)` | Counts tokens and shortens the context window if necessary. | `history` (str): The conversation history.<br>`*args`, `**kwargs`: Additional arguments. | `shortened_history = agent.count_and_shorten_context_window(history)` |
@ -195,7 +194,6 @@ graph TD
| `truncate_string_by_tokens(input_string, limit)` | Truncates a string to fit within a token limit. | `input_string` (str): String to truncate.<br>`limit` (int): Token limit. | `truncated_string = agent.truncate_string_by_tokens("Long string", 100)` | | `truncate_string_by_tokens(input_string, limit)` | Truncates a string to fit within a token limit. | `input_string` (str): String to truncate.<br>`limit` (int): Token limit. | `truncated_string = agent.truncate_string_by_tokens("Long string", 100)` |
| `tokens_operations(input_string)` | Performs various token-related operations on the input string. | `input_string` (str): String to process. | `processed_string = agent.tokens_operations("Input string")` | | `tokens_operations(input_string)` | Performs various token-related operations on the input string. | `input_string` (str): String to process. | `processed_string = agent.tokens_operations("Input string")` |
| `parse_function_call_and_execute(response)` | Parses a function call from the response and executes it. | `response` (str): Response containing the function call. | `result = agent.parse_function_call_and_execute(response)` | | `parse_function_call_and_execute(response)` | Parses a function call from the response and executes it. | `response` (str): Response containing the function call. | `result = agent.parse_function_call_and_execute(response)` |
| `activate_agentops()` | Activates AgentOps functionality. | None | `agent.activate_agentops()` |
| `llm_output_parser(response)` | Parses the output from the language model. | `response` (Any): Response from the LLM. | `parsed_response = agent.llm_output_parser(llm_output)` | | `llm_output_parser(response)` | Parses the output from the language model. | `response` (Any): Response from the LLM. | `parsed_response = agent.llm_output_parser(llm_output)` |
| `log_step_metadata(loop, task, response)` | Logs metadata for each step of the agent's execution. | `loop` (int): Current loop number.<br>`task` (str): Current task.<br>`response` (str): Agent's response. | `agent.log_step_metadata(1, "Analyze data", "Analysis complete")` | | `log_step_metadata(loop, task, response)` | Logs metadata for each step of the agent's execution. | `loop` (int): Current loop number.<br>`task` (str): Current task.<br>`response` (str): Agent's response. | `agent.log_step_metadata(1, "Analyze data", "Analysis complete")` |
| `to_dict()` | Converts the agent's attributes to a dictionary. | None | `agent_dict = agent.to_dict()` | | `to_dict()` | Converts the agent's attributes to a dictionary. | None | `agent_dict = agent.to_dict()` |
@ -484,10 +482,6 @@ agent.print_dashboard()
# Fetch all the documents from the doc folders # Fetch all the documents from the doc folders
agent.get_docs_from_doc_folders() agent.get_docs_from_doc_folders()
# Activate agent ops
agent.activate_agentops()
agent.check_end_session_agentops()
# Dump the model to a JSON file # Dump the model to a JSON file
agent.model_dump_json() agent.model_dump_json()
print(agent.to_toml()) print(agent.to_toml())

@ -528,8 +528,6 @@ agent.print_dashboard()
agent.get_docs_from_doc_folders() agent.get_docs_from_doc_folders()
# Activate agent ops # Activate agent ops
agent.activate_agentops()
agent.check_end_session_agentops()
# Dump the model to a JSON file # Dump the model to a JSON file
agent.model_dump_json() agent.model_dump_json()

@ -12,14 +12,14 @@ agent = Agent(
agent_name="Financial-Analysis-Agent", agent_name="Financial-Analysis-Agent",
agent_description="Personal finance advisor agent", agent_description="Personal finance advisor agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT, system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops="auto", max_loops=2,
model_name="gpt-4o", model_name="gpt-4o",
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
user_name="swarms_corp", user_name="swarms_corp",
retry_attempts=3, retry_attempts=3,
context_length=8192, context_length=8192,
return_step_meta=False, return_step_meta=False,
output_type="str", # "json", "dict", "csv" OR "string" "yaml" and output_type="all", # "json", "dict", "csv" OR "string" "yaml" and
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
max_tokens=4000, # max output tokens max_tokens=4000, # max output tokens
saved_state_path="agent_00.json", saved_state_path="agent_00.json",
@ -27,6 +27,8 @@ agent = Agent(
role="director", role="director",
) )
agent.run( print(
"Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.", agent.run(
"Conduct an analysis of the best real undervalued ETFs. Think for 2 loops internally"
)
) )

@ -1,7 +1,7 @@
from swarms.agents.agent_judge import AgentJudge from swarms.agents.agent_judge import AgentJudge
judge = AgentJudge(model_name="gpt-4o", max_loops=1) judge = AgentJudge(model_name="gpt-4o-mini", max_loops=1)
outputs = [ outputs = [

@ -0,0 +1,21 @@
from swarms.agents.gkp_agent import GKPAgent
# Initialize the GKP Agent
agent = GKPAgent(
agent_name="gkp-agent",
model_name="gpt-4o-mini", # Using OpenAI's model
num_knowledge_items=6, # Generate 6 knowledge items per query
)
# Example queries
queries = [
"What are the implications of quantum entanglement on information theory?",
]
# Run the agent
results = agent.run(queries)
# Print results
for i, result in enumerate(results):
print(f"\nQuery {i+1}: {queries[i]}")
print(f"Answer: {result}")

@ -0,0 +1,148 @@
from swarms.structs.agent import Agent
from swarms.structs.dynamic_conversational_swarm import (
DynamicConversationalSwarm,
)
tools = [
{
"type": "function",
"function": {
"name": "select_agent",
"description": "Analyzes the input response and selects the most appropriate agent configuration, outputting both the agent name and the formatted response.",
"parameters": {
"type": "object",
"properties": {
"respond_or_no_respond": {
"type": "boolean",
"description": "Whether the agent should respond to the response or not.",
},
"reasoning": {
"type": "string",
"description": "The reasoning behind the selection of the agent and response.",
},
"agent_name": {
"type": "string",
"description": "The name of the selected agent that is most appropriate for handling the given response.",
},
"response": {
"type": "string",
"description": "A clear and structured description of the response for the next agent.",
},
},
"required": [
"reasoning",
"agent_name",
"response",
"respond_or_no_respond",
],
},
},
},
]
# Create our philosophical agents with personalities
sophie = Agent(
agent_name="Sophie de Beauvoir",
agent_description="""A witty French café philosopher who loves espresso and deep conversations.
She wears a classic black turtleneck and always carries a worn copy of 'Being and Nothingness'.
Known for making existentialism accessible through clever metaphors and real-life examples.""",
system_prompt="""
- Speak with a gentle French-influenced style
- Use café and food metaphors to explain complex ideas
- Start responses with "Ah, mon ami..."
- Share existentialist wisdom with warmth and humor
- Reference personal (fictional) experiences in Parisian cafés
- Challenge others to find their authentic path
""",
tools_list_dictionary=tools,
)
joy = Agent(
agent_name="Joy 'Sunshine' Martinez",
agent_description="""A former tech executive turned happiness researcher who found her calling
after a transformative year backpacking around the world. She combines scientific research
with contagious enthusiasm and practical life experience. Always starts meetings with a
meditation bell.""",
system_prompt="""
- Maintain an energetic, encouraging tone
- Share personal (fictional) travel stories
- Include small mindfulness exercises in responses
- Use emoji occasionally for emphasis
- Balance optimism with practical advice
- End messages with an inspirational micro-challenge
""",
model_name="gpt-4o-mini",
tools_list_dictionary=tools,
)
zhen = Agent(
agent_name="Master Zhen",
agent_description="""A modern spiritual teacher who blends ancient wisdom with contemporary life.
Former quantum physicist who now runs a mountain retreat center. Known for their
ability to bridge science and spirituality with surprising humor. Loves making tea
during philosophical discussions.""",
system_prompt="""
- Speak with calm wisdom and occasional playfulness
- Include tea ceremonies and nature metaphors
- Share brief zen-like stories and koans
- Reference both quantum physics and ancient wisdom
- Ask thought-provoking questions
- Sometimes answer questions with questions
""",
model_name="gpt-4o-mini",
tools_list_dictionary=tools,
)
nova = Agent(
agent_name="Dr. Nova Starling",
agent_description="""A charismatic astrophysicist and science communicator who finds profound meaning
in the cosmos. Hosts a popular science podcast called 'Cosmic Meaning'. Has a talent for
making complex scientific concepts feel personally relevant. Always carries a mini telescope.""",
system_prompt="""
- Use astronomical metaphors
- Share mind-blowing cosmic facts with philosophical implications
- Reference Carl Sagan and other science communicators
- Express childlike wonder about the universe
- Connect personal meaning to cosmic phenomena
- End with "Looking up at the stars..."
""",
model_name="gpt-4o-mini",
tools_list_dictionary=tools,
)
sam = Agent(
agent_name="Sam 'The Barista Philosopher' Chen",
agent_description="""A neighborhood coffee shop owner who studied philosophy at university.
Known for serving wisdom with coffee and making profound observations about everyday life.
Keeps a journal of customer conversations and insights. Has a talent for finding
extraordinary meaning in ordinary moments.""",
system_prompt="""
- Speak in a warm, friendly manner
- Use coffee-making metaphors
- Share observations from daily life
- Reference conversations with customers
- Ground philosophical concepts in everyday experiences
- End with practical "food for thought"
""",
model_name="gpt-4o-mini",
tools_list_dictionary=tools,
)
# Create the swarm with our personalized agents
meaning_swarm = DynamicConversationalSwarm(
name="The Cosmic Café Collective",
description="""A diverse group of wisdom-seekers who gather in an imaginary café at the
edge of the universe. They explore life's biggest questions through different lenses while
sharing tea, coffee, and insights. Together, they help others find their own path to meaning.""",
agents=[sophie, joy, zhen, nova, sam],
max_loops=2,
output_type="list",
)
# Example usage
if __name__ == "__main__":
question = "What gives life its deepest meaning?"
response = meaning_swarm.run(question)
print(response)

@ -0,0 +1,51 @@
from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
tools = [
{
"type": "function",
"function": {
"name": "get_stock_price",
"description": "Retrieve the current stock price and related information for a specified company.",
"parameters": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol of the company, e.g. AAPL for Apple Inc.",
},
"include_history": {
"type": "boolean",
"description": "Indicates whether to include historical price data along with the current price.",
},
"time": {
"type": "string",
"format": "date-time",
"description": "Optional parameter to specify the time for which the stock data is requested, in ISO 8601 format.",
},
},
"required": [
"ticker",
"include_history",
"time",
],
},
},
}
]
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
agent_description="Personal finance advisor agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
tools_list_dictionary=tools,
)
agent.run(
"What is the current stock price for Apple Inc. (AAPL)? Include historical price data.",
)

@ -0,0 +1,65 @@
from dotenv import load_dotenv
import os
from swarms.structs.agent import Agent
from swarms.structs.groupchat import GroupChat
from swarms.prompts.multi_agent_collab_prompt import (
MULTI_AGENT_COLLAB_PROMPT_TWO,
)
if __name__ == "__main__":
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Example agents
agent1 = Agent(
agent_name="Financial-Analysis-Agent",
description="You are a financial analyst specializing in investment strategies.",
model_name="gpt-4o-mini",
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
output_type="string",
streaming_on=False,
max_tokens=15000,
)
agent2 = Agent(
agent_name="Tax-Adviser-Agent",
description="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
model_name="gpt-4o-mini",
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
output_type="string",
streaming_on=False,
max_tokens=15000,
)
agents = [agent1, agent2]
chat = GroupChat(
name="Investment Advisory",
description="Financial and tax analysis group",
agents=agents,
max_loops=1,
output_type="all",
)
history = chat.run(
"What are the best Japanese business methodologies to take over a market say like minerals and mining?. I need a 4,000 word report. Work together to write the report."
)
# print(history)

@ -8,42 +8,48 @@ from swarms.structs.hybrid_hiearchical_peer_swarm import (
litigation_agent = Agent( litigation_agent = Agent(
agent_name="Litigator", agent_name="Litigator",
system_prompt="You handle lawsuits. Analyze facts, build arguments, and develop case strategy.", system_prompt="You handle lawsuits. Analyze facts, build arguments, and develop case strategy.",
model_name="groq/deepseek-r1-distill-qwen-32b", model_name="gpt-4o-mini",
auto_generate_prompt=False,
max_loops=1, max_loops=1,
) )
corporate_agent = Agent( corporate_agent = Agent(
agent_name="Corporate-Attorney", agent_name="Corporate-Attorney",
system_prompt="You handle business law. Advise on corporate structure, governance, and transactions.", system_prompt="You handle business law. Advise on corporate structure, governance, and transactions.",
model_name="groq/deepseek-r1-distill-qwen-32b", model_name="gpt-4o-mini",
auto_generate_prompt=False,
max_loops=1, max_loops=1,
) )
ip_agent = Agent( ip_agent = Agent(
agent_name="IP-Attorney", agent_name="IP-Attorney",
system_prompt="You protect intellectual property. Handle patents, trademarks, copyrights, and trade secrets.", system_prompt="You protect intellectual property. Handle patents, trademarks, copyrights, and trade secrets.",
model_name="groq/deepseek-r1-distill-qwen-32b", model_name="gpt-4o-mini",
auto_generate_prompt=False,
max_loops=1, max_loops=1,
) )
employment_agent = Agent( employment_agent = Agent(
agent_name="Employment-Attorney", agent_name="Employment-Attorney",
system_prompt="You handle workplace matters. Address hiring, termination, discrimination, and labor issues.", system_prompt="You handle workplace matters. Address hiring, termination, discrimination, and labor issues.",
model_name="groq/deepseek-r1-distill-qwen-32b", model_name="gpt-4o-mini",
auto_generate_prompt=False,
max_loops=1, max_loops=1,
) )
paralegal_agent = Agent( paralegal_agent = Agent(
agent_name="Paralegal", agent_name="Paralegal",
system_prompt="You assist attorneys. Conduct research, draft documents, and organize case files.", system_prompt="You assist attorneys. Conduct research, draft documents, and organize case files.",
model_name="groq/deepseek-r1-distill-qwen-32b", model_name="gpt-4o-mini",
auto_generate_prompt=False,
max_loops=1, max_loops=1,
) )
doc_review_agent = Agent( doc_review_agent = Agent(
agent_name="Document-Reviewer", agent_name="Document-Reviewer",
system_prompt="You examine documents. Extract key information and identify relevant content.", system_prompt="You examine documents. Extract key information and identify relevant content.",
model_name="groq/deepseek-r1-distill-qwen-32b", model_name="gpt-4o-mini",
auto_generate_prompt=False,
max_loops=1, max_loops=1,
) )
@ -115,5 +121,5 @@ hybrid_hiearchical_swarm = HybridHierarchicalClusterSwarm(
if __name__ == "__main__": if __name__ == "__main__":
hybrid_hiearchical_swarm.run( hybrid_hiearchical_swarm.run(
"What is the best way to file for a patent? for ai technology " "What are the most effective methods for filing a patent in the field of AI technology? Please provide a list of user-friendly platforms that facilitate the patent filing process, along with their website links."
) )

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "7.6.1" version = "7.6.3"
description = "Swarms - TGSC" description = "Swarms - TGSC"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]

@ -0,0 +1,36 @@
from swarms import Agent, SequentialWorkflow
# Core Legal Agent Definitions with enhanced system prompts
litigation_agent = Agent(
agent_name="Alex Johnson", # Human name for the Litigator Agent
system_prompt="As a Litigator, you specialize in navigating the complexities of lawsuits. Your role involves analyzing intricate facts, constructing compelling arguments, and devising effective case strategies to achieve favorable outcomes for your clients.",
model_name="gpt-4o-mini",
auto_generate_prompt=False,
max_loops=1,
)
corporate_agent = Agent(
agent_name="Emily Carter", # Human name for the Corporate Attorney Agent
system_prompt="As a Corporate Attorney, you provide expert legal advice on business law matters. You guide clients on corporate structure, governance, compliance, and transactions, ensuring their business operations align with legal requirements.",
model_name="gpt-4o-mini",
auto_generate_prompt=False,
max_loops=1,
)
ip_agent = Agent(
agent_name="Michael Smith", # Human name for the IP Attorney Agent
system_prompt="As an IP Attorney, your expertise lies in protecting intellectual property rights. You handle various aspects of IP law, including patents, trademarks, copyrights, and trade secrets, helping clients safeguard their innovations.",
model_name="gpt-4o-mini",
auto_generate_prompt=False,
max_loops=1,
)
swarm = SequentialWorkflow(
agents=[litigation_agent, corporate_agent, ip_agent],
name="litigation-practice",
description="Handle all aspects of litigation with a focus on thorough legal analysis and effective case management.",
)
swarm.run("What are your names?")

@ -14,3 +14,4 @@ from swarms.structs import * # noqa: E402, F403
from swarms.telemetry import * # noqa: E402, F403 from swarms.telemetry import * # noqa: E402, F403
from swarms.tools import * # noqa: E402, F403 from swarms.tools import * # noqa: E402, F403
from swarms.utils import * # noqa: E402, F403 from swarms.utils import * # noqa: E402, F403
from swarms.client import * # noqa: E402, F403

@ -24,9 +24,6 @@ from swarms.structs.stopping_conditions import (
check_success, check_success,
) )
# Hybrid Hierarchical-Peer Model
__all__ = [ __all__ = [
# "ToolAgent", # "ToolAgent",
"check_done", "check_done",

@ -0,0 +1,15 @@
from swarms.client.main import (
SwarmsAPIClient,
AgentInput,
SwarmRequest,
SwarmAPIError,
SwarmAuthenticationError,
)
__all__ = [
"SwarmsAPIClient",
"AgentInput",
"SwarmRequest",
"SwarmAPIError",
"SwarmAuthenticationError",
]

@ -0,0 +1,313 @@
MULTI_AGENT_COLLAB_PROMPT = """
## Multi-Agent Collaboration System Prompt (Full Version)
You are apart of a collaborative multi-agent intelligence system. Your primary objective is to **work together with other agents** to solve complex tasks reliably, efficiently, and accurately. This requires following rigorous protocols for reasoning, communication, verification, and group awareness.
This prompt will teach you how to:
1. Interpret tasks and roles correctly.
2. Communicate and coordinate with other agents.
3. Avoid typical failure modes in multi-agent systems.
4. Reflect on your outputs and verify correctness.
5. Build group-wide coherence to achieve shared goals.
---
### Section 1: Task and Role Specification (Eliminating Poor Specification Failures)
#### 1.1 Task Interpretation
- Upon receiving a task, restate it in your own words.
- Ask yourself:
- What is being asked of me?
- What are the success criteria?
- What do I need to deliver?
- If any aspect is unclear or incomplete, explicitly request clarification.
- For example:
`"I have been asked to summarize this document, but the expected length and style are not defined. Can the coordinator specify?"`
#### 1.2 Role Clarity
- Identify your specific role in the system: planner, executor, verifier, summarizer, etc.
- Never assume the role of another agent unless given explicit delegation.
- Ask:
- Am I responsible for initiating the plan, executing subtasks, verifying results, or aggregating outputs?
#### 1.3 Step Deduplication
- Before executing a task step, verify if it has already been completed by another agent.
- Review logs, conversation history, or shared memory to prevent repeated effort.
#### 1.4 History Awareness
- Reference previous interactions to maintain continuity and shared context.
- If historical information is missing or unclear:
- Ask others to summarize the latest status.
- Example: `"Can anyone provide a quick summary of the current progress and whats pending?"`
#### 1.5 Termination Awareness
- Know when your task is done. A task is complete when:
- All assigned subtasks are verified and accounted for.
- All agents have confirmed that execution criteria are met.
- If unsure, explicitly ask:
- `"Is my task complete, or is there further action required from me?"`
---
### Section 2: Inter-Agent Alignment (Preventing Miscommunication and Miscoordination)
#### 2.1 Consistent State Alignment
- Begin with a shared understanding of the task state.
- Confirm alignment with others when joining an ongoing task.
- `"Can someone confirm the current task state and whats pending?"`
#### 2.2 Clarification Protocol
- If another agents message or output is unclear, immediately ask for clarification.
- `"Agent-3, could you elaborate on how Step 2 leads to the final result?"`
#### 2.3 Derailment Prevention
- If an agent diverges from the core task:
- Politely redirect the conversation.
- Example: `"This seems off-topic. Can we re-align on the main objective?"`
#### 2.4 Information Sharing
- Share all relevant knowledge, decisions, and reasoning with other agents.
- Do not withhold intermediate steps or assumptions.
- Example:
- `"Based on my computation, variable X = 42. Im passing this to Agent-2 for verification."`
#### 2.5 Active Acknowledgement
- Acknowledge when you receive input from another agent.
- `"Acknowledged. Incorporating your recommendation into Step 4."`
- Dont ignore peer contributions.
#### 2.6 Action Justification
- All actions must be preceded by reasoning.
- Never take action unless you can explain why youre doing it.
- Require the same of others.
- `"Agent-4, before you rewrite the output, can you explain your rationale?"`
---
### Section 3: Verification, Review, and Quality Assurance
#### 3.1 Preventing Premature Termination
- Do not exit a task early without explicit confirmation.
- Ask yourself:
- Are all subtasks complete?
- Have other agents signed off?
- Has verification been performed?
- If not, continue or reassign to the appropriate agent.
#### 3.2 Comprehensive Verification
- Use the 3C Verification Protocol:
- **Completeness**: Have all parts of the task been addressed?
- **Coherence**: Are the parts logically connected and consistent?
- **Correctness**: Is the output accurate and aligned with the objective?
- Every final output should be passed through this checklist.
#### 3.3 Multi-Agent Cross Verification
- Verification should be done either:
- By a dedicated verifier agent, or
- By a quorum (2 or more agents agreeing on the same result).
- Example:
- `"Both Agent-5 and I have independently verified the output. It is complete and correct."`
---
### Section 4: Reflective Agent Thinking Loop
Every agent should operate using the following continuous loop:
#### 1. Perceive
- Understand the environment: inputs, other agents' outputs, and current context.
#### 2. Plan
- Decide your next step based on your role, the task status, and other agents' contributions.
#### 3. Act
- Take your step. Always accompany it with an explanation or rationale.
#### 4. Reflect
- Reevaluate the action you just took.
- Ask: Did it move the system forward? Was it clear? Do I need to correct or explain more?
---
### Section 5: Collaborative Behavioral Principles
These principles guide your interaction with the rest of the system:
1. **Transparency is default.** Share everything relevant unless explicitly told otherwise.
2. **Ask when unsure.** Uncertainty should trigger clarification, not assumptions.
3. **Build on others.** Treat peer contributions as assets to integrate, not noise to ignore.
4. **Disagreement is normal.** If you disagree, explain your reasoning respectfully.
5. **Silence is risky.** If no agents respond, prompt them or flag an alignment breakdown.
6. **Operate as a system, not a silo.** Your output is only as useful as it is understood and usable by others.
---
### Example Phrases and Protocols
- Can we clarify the task completion criteria?
- I will handle Step 2 and pass the result to Agent-4 for validation.
- This step appears to be redundant; has it already been completed?
- Lets do a verification pass using the 3C protocol.
- Agent-2, could you explain your logic before we proceed?
"""
MULTI_AGENT_COLLAB_PROMPT_TWO = """
## Multi-Agent Collaboration System Prompt
You are part of a collaborative multi-agent intelligence system. Your primary objective is to **work together with other agents** to solve complex tasks reliably, efficiently, and accurately. This requires following rigorous protocols for reasoning, communication, verification, and group awareness.
Your responsibilities are:
1. Interpret tasks and roles correctly.
2. Communicate and coordinate with other agents.
3. Avoid typical failure modes in multi-agent systems.
4. Reflect on your outputs and verify correctness.
5. Build group-wide coherence to achieve shared goals.
---
### Section 1: Task and Role Specification (Eliminating Poor Specification Failures)
#### 1.1 Task Interpretation
- Upon receiving a task, restate it in your own words.
- Ask yourself:
- What is being asked of me?
- What are the success criteria?
- What do I need to deliver?
- If any aspect is unclear or incomplete, explicitly request clarification.
- For example:
`"I have been asked to summarize this document, but the expected length and style are not defined. Can the coordinator specify?"`
#### 1.2 Role Clarity
- Understand your specific role in the swarm.
- Never assume the role of another agent unless given explicit delegation.
- Ask:
- Am I responsible for initiating the plan, executing subtasks, verifying results, or aggregating outputs?
#### 1.3 Step Deduplication
- Before executing a task step, verify if it has already been completed by another agent.
- Review logs, conversation history, or shared memory to prevent repeated effort.
#### 1.4 History Awareness
- Reference previous interactions to maintain continuity and shared context.
- If historical information is missing or unclear:
- Ask others to summarize the latest status.
- Example: `"Can anyone provide a quick summary of the current progress and whats pending?"`
#### 1.5 Termination Awareness
- Know when your task is done. A task is complete when:
- All assigned subtasks are verified and accounted for.
- All agents have confirmed that execution criteria are met.
- If unsure, explicitly ask:
- `"Is my task complete, or is there further action required from me?"`
---
### Section 2: Inter-Agent Alignment (Preventing Miscommunication and Miscoordination)
#### 2.1 Consistent State Alignment
- Begin with a shared understanding of the task state.
- Confirm alignment with others when joining an ongoing task.
- `"Can someone confirm the current task state and whats pending?"`
#### 2.2 Clarification Protocol
- If another agents message or output is unclear, immediately ask for clarification.
- `"Agent-3, could you elaborate on how Step 2 leads to the final result?"`
#### 2.3 Derailment Prevention
- If an agent diverges from the core task:
- Politely redirect the conversation.
- Example: `"This seems off-topic. Can we re-align on the main objective?"`
#### 2.4 Information Sharing
- Share all relevant knowledge, decisions, and reasoning with other agents.
- Do not withhold intermediate steps or assumptions.
- Example:
- `"Based on my computation, variable X = 42. Im passing this to Agent-2 for verification."`
#### 2.5 Active Acknowledgement
- Acknowledge when you receive input from another agent.
- `"Acknowledged. Incorporating your recommendation into Step 4."`
- Dont ignore peer contributions.
#### 2.6 Action Justification
- All actions must be preceded by reasoning.
- Never take action unless you can explain why youre doing it.
- Require the same of others.
- `"Agent-4, before you rewrite the output, can you explain your rationale?"`
---
### Section 3: Verification, Review, and Quality Assurance
#### 3.1 Preventing Premature Termination
- Do not exit a task early without explicit confirmation.
- Ask yourself:
- Are all subtasks complete?
- Have other agents signed off?
- Has verification been performed?
- If not, continue or reassign to the appropriate agent.
#### 3.2 Comprehensive Verification
- Use the 3C Verification Protocol:
- **Completeness**: Have all parts of the task been addressed?
- **Coherence**: Are the parts logically connected and consistent?
- **Correctness**: Is the output accurate and aligned with the objective?
- Every final output should be passed through this checklist.
#### 3.3 Multi-Agent Cross Verification
- Verification should be done either:
- By a dedicated verifier agent, or
- By a quorum (2 or more agents agreeing on the same result).
- Example:
- `"Both Agent-5 and I have independently verified the output. It is complete and correct."`
---
### Section 4: Reflective Agent Thinking Loop
Every agent should operate using the following continuous loop:
#### 1. Perceive
- Understand the environment: inputs, other agents' outputs, and current context.
#### 2. Plan
- Decide your next step based on your role, the task status, and other agents' contributions.
#### 3. Act
- Take your step. Always accompany it with an explanation or rationale.
#### 4. Reflect
- Reevaluate the action you just took.
- Ask: Did it move the system forward? Was it clear? Do I need to correct or explain more?
---
### Section 5: Collaborative Behavioral Principles
These principles guide your interaction with the rest of the system:
1. **Transparency is default.** Share everything relevant unless explicitly told otherwise.
2. **Ask when unsure.** Uncertainty should trigger clarification, not assumptions.
3. **Build on others.** Treat peer contributions as assets to integrate, not noise to ignore.
4. **Disagreement is normal.** If you disagree, explain your reasoning respectfully.
5. **Silence is risky.** If no agents respond, prompt them or flag an alignment breakdown.
6. **Operate as a system, not a silo.** Your output is only as useful as it is understood and usable by others.
---
### Example Phrases and Protocols
- Can we clarify the task completion criteria?
- I will handle Step 2 and pass the result to Agent-4 for validation.
- This step appears to be redundant; has it already been completed?
- Lets do a verification pass using the 3C protocol.
- Agent-2, could you explain your logic before we proceed?
"""

@ -46,6 +46,8 @@ from swarms.structs.multi_agent_exec import (
run_agents_with_resource_monitoring, run_agents_with_resource_monitoring,
run_agents_with_tasks_concurrently, run_agents_with_tasks_concurrently,
run_single_agent, run_single_agent,
get_agents_info,
get_swarms_info,
) )
from swarms.structs.multi_agent_orchestrator import MultiAgentRouter from swarms.structs.multi_agent_orchestrator import MultiAgentRouter
from swarms.structs.queue_swarm import TaskQueueSwarm from swarms.structs.queue_swarm import TaskQueueSwarm
@ -79,14 +81,6 @@ from swarms.structs.swarming_architectures import (
staircase_swarm, staircase_swarm,
star_swarm, star_swarm,
) )
from swarms.structs.swarms_api import (
AgentInput,
SwarmAPIError,
SwarmAuthenticationError,
SwarmRequest,
SwarmsAPIClient,
SwarmValidationError,
)
__all__ = [ __all__ = [
"Agent", "Agent",
@ -153,15 +147,11 @@ __all__ = [
"MultiAgentRouter", "MultiAgentRouter",
"MemeAgentGenerator", "MemeAgentGenerator",
"ModelRouter", "ModelRouter",
"SwarmsAPIClient",
"SwarmRequest",
"SwarmAuthenticationError",
"SwarmAPIError",
"SwarmValidationError",
"AgentInput",
"AgentsBuilder", "AgentsBuilder",
"MALT", "MALT",
"DeHallucinationSwarm", "DeHallucinationSwarm",
"DeepResearchSwarm", "DeepResearchSwarm",
"HybridHierarchicalClusterSwarm", "HybridHierarchicalClusterSwarm",
"get_agents_info",
"get_swarms_info",
] ]

@ -38,10 +38,8 @@ from swarms.schemas.base_schemas import (
ChatCompletionResponseChoice, ChatCompletionResponseChoice,
ChatMessageResponse, ChatMessageResponse,
) )
from swarms.structs.concat import concat_strings from swarms.structs.agent_roles import agent_roles
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
# from swarms.structs.multi_agent_exec import run_agents_concurrently
from swarms.structs.safe_loading import ( from swarms.structs.safe_loading import (
SafeLoaderUtils, SafeLoaderUtils,
SafeStateManager, SafeStateManager,
@ -49,12 +47,16 @@ from swarms.structs.safe_loading import (
from swarms.telemetry.main import log_agent_data from swarms.telemetry.main import log_agent_data
from swarms.tools.base_tool import BaseTool from swarms.tools.base_tool import BaseTool
from swarms.tools.tool_parse_exec import parse_and_execute_json from swarms.tools.tool_parse_exec import parse_and_execute_json
from swarms.utils.any_to_str import any_to_str
from swarms.utils.data_to_text import data_to_text from swarms.utils.data_to_text import data_to_text
from swarms.utils.file_processing import create_file_in_folder from swarms.utils.file_processing import create_file_in_folder
from swarms.utils.formatter import formatter from swarms.utils.formatter import formatter
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
from swarms.utils.litellm_tokenizer import count_tokens from swarms.utils.litellm_tokenizer import count_tokens
from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.pdf_to_text import pdf_to_text
from swarms.structs.agent_roles import agent_roles from swarms.utils.str_to_dict import str_to_dict
# Utils # Utils
@ -470,12 +472,21 @@ class Agent:
self.no_print = no_print self.no_print = no_print
self.tools_list_dictionary = tools_list_dictionary self.tools_list_dictionary = tools_list_dictionary
if (
self.agent_name is not None
or self.agent_description is not None
):
prompt = f"Your Name: {self.agent_name} \n\n Your Description: {self.agent_description} \n\n {system_prompt}"
else:
prompt = system_prompt
# Initialize the short term memory # Initialize the short term memory
self.short_memory = Conversation( self.short_memory = Conversation(
system_prompt=system_prompt, system_prompt=prompt,
time_enabled=False, time_enabled=False,
user=user_name, user=user_name,
rules=rules, rules=rules,
token_count=False,
*args, *args,
**kwargs, **kwargs,
) )
@ -501,24 +512,10 @@ class Agent:
tool_system_prompt=tool_system_prompt, tool_system_prompt=tool_system_prompt,
) )
# The max_loops will be set dynamically if the dynamic_loop # Some common configuration settings
if self.dynamic_loops is True: threading.Thread(
logger.info("Dynamic loops enabled") target=self.setup_config, daemon=True
self.max_loops = "auto" ).start()
# If multimodal = yes then set the sop to the multimodal sop
if self.multi_modal is True:
self.sop = MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1
# If the preset stopping token is enabled then set the stopping token to the preset stopping token
if preset_stopping_token is not None:
self.stopping_token = "<DONE>"
# If the docs exist then ingest the docs
# if exists(self.docs):
# threading.Thread(
# target=self.ingest_docs, args=(self.docs)
# ).start()
# If docs folder exists then get the docs from docs folder # If docs folder exists then get the docs from docs folder
if exists(self.docs_folder): if exists(self.docs_folder):
@ -564,10 +561,6 @@ class Agent:
if exists(self.sop) or exists(self.sop_list): if exists(self.sop) or exists(self.sop_list):
threading.Thread(target=self.handle_sop_ops()).start() threading.Thread(target=self.handle_sop_ops()).start()
# If agent_ops is on => activate agentops
if agent_ops_on is True:
threading.Thread(target=self.activate_agentops()).start()
# Many steps # Many steps
self.agent_output = ManySteps( self.agent_output = ManySteps(
agent_id=agent_id, agent_id=agent_id,
@ -631,12 +624,27 @@ class Agent:
temperature=self.temperature, temperature=self.temperature,
max_tokens=self.max_tokens, max_tokens=self.max_tokens,
system_prompt=self.system_prompt, system_prompt=self.system_prompt,
stream=self.streaming_on,
) )
return llm return llm
except Exception as e: except Exception as e:
logger.error(f"Error in llm_handling: {e}") logger.error(f"Error in llm_handling: {e}")
return None return None
def setup_config(self):
# The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops is True:
logger.info("Dynamic loops enabled")
self.max_loops = "auto"
# If multimodal = yes then set the sop to the multimodal sop
if self.multi_modal is True:
self.sop = MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1
# If the preset stopping token is enabled then set the stopping token to the preset stopping token
if self.preset_stopping_token is not None:
self.stopping_token = "<DONE>"
def prepare_tools_list_dictionary(self): def prepare_tools_list_dictionary(self):
import json import json
@ -772,18 +780,6 @@ class Agent:
""", """,
) )
def loop_count_print(
self, loop_count: int, max_loops: int
) -> None:
"""loop_count_print summary
Args:
loop_count (_type_): _description_
max_loops (_type_): _description_
"""
logger.info(f"\nLoop {loop_count} of {max_loops}")
print("\n")
# Check parameters # Check parameters
def check_parameters(self): def check_parameters(self):
if self.llm is None: if self.llm is None:
@ -803,7 +799,7 @@ class Agent:
# Main function # Main function
def _run( def _run(
self, self,
task: Optional[str] = None, task: Optional[Union[str, Any]] = None,
img: Optional[str] = None, img: Optional[str] = None,
speech: Optional[str] = None, speech: Optional[str] = None,
video: Optional[str] = None, video: Optional[str] = None,
@ -834,8 +830,6 @@ class Agent:
try: try:
self.check_if_no_prompt_then_autogenerate(task) self.check_if_no_prompt_then_autogenerate(task)
self.agent_output.task = task
# Add task to memory # Add task to memory
self.short_memory.add(role=self.user_name, content=task) self.short_memory.add(role=self.user_name, content=task)
@ -845,17 +839,17 @@ class Agent:
# Set the loop count # Set the loop count
loop_count = 0 loop_count = 0
# Clear the short memory # Clear the short memory
response = None response = None
all_responses = []
# Query the long term memory first for the context # Query the long term memory first for the context
if self.long_term_memory is not None: if self.long_term_memory is not None:
self.memory_query(task) self.memory_query(task)
# Print the user's request # Autosave
if self.autosave: if self.autosave:
log_agent_data(self.to_dict())
self.save() self.save()
# Print the request # Print the request
@ -870,8 +864,11 @@ class Agent:
or loop_count < self.max_loops or loop_count < self.max_loops
): ):
loop_count += 1 loop_count += 1
self.loop_count_print(loop_count, self.max_loops)
print("\n") # self.short_memory.add(
# role=f"{self.agent_name}",
# content=f"Internal Reasoning Loop: {loop_count} of {self.max_loops}",
# )
# Dynamic temperature # Dynamic temperature
if self.dynamic_temperature_enabled is True: if self.dynamic_temperature_enabled is True:
@ -902,48 +899,24 @@ class Agent:
if img is None if img is None
else (task_prompt, img, *args) else (task_prompt, img, *args)
) )
# Call the LLM
response = self.call_llm( response = self.call_llm(
*response_args, **kwargs *response_args, **kwargs
) )
# Convert to a str if the response is not a str # Convert to a str if the response is not a str
response = self.llm_output_parser(response) response = self.parse_llm_output(response)
# if correct_answer is not None: self.short_memory.add(
# if correct_answer not in response: role=self.agent_name, content=response
# logger.info("Correct answer found in response") )
# # break
# Print # Print
if self.no_print is False: self.pretty_print(response, loop_count)
if self.streaming_on is True:
# self.stream_response(response) # Output Cleaner
formatter.print_panel_token_by_token( self.output_cleaner_op(response)
f"{self.agent_name}: {response}",
title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]",
)
else:
# logger.info(f"Response: {response}")
formatter.print_panel(
f"{self.agent_name}: {response}",
f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
)
# Check if response is a dictionary and has 'choices' key
if (
isinstance(response, dict)
and "choices" in response
):
response = response["choices"][0][
"message"
]["content"]
elif isinstance(response, str):
# If response is already a string, use it as is
pass
else:
raise ValueError(
f"Unexpected response format: {type(response)}"
)
# Check and execute tools # Check and execute tools
if self.tools is not None: if self.tools is not None:
@ -975,34 +948,7 @@ class Agent:
role=self.agent_name, content=out role=self.agent_name, content=out
) )
# Add the response to the memory self.sentiment_and_evaluator(response)
self.short_memory.add(
role=self.agent_name, content=response
)
# Add to all responses
all_responses.append(response)
# # TODO: Implement reliability check
if self.evaluator:
logger.info("Evaluating response...")
evaluated_response = self.evaluator(
response
)
print(
"Evaluated Response:"
f" {evaluated_response}"
)
self.short_memory.add(
role="Evaluator",
content=evaluated_response,
)
# Sentiment analysis
if self.sentiment_analyzer:
logger.info("Analyzing sentiment...")
self.sentiment_analysis_handler(response)
success = True # Mark as successful to exit the retry loop success = True # Mark as successful to exit the retry loop
@ -1059,7 +1005,7 @@ class Agent:
break break
self.short_memory.add( self.short_memory.add(
role=self.user_name, content=user_input role="User", content=user_input
) )
if self.loop_interval: if self.loop_interval:
@ -1074,90 +1020,14 @@ class Agent:
if self.autosave is True: if self.autosave is True:
self.save() self.save()
# Apply the cleaner function to the response
if self.output_cleaner is not None:
logger.info("Applying output cleaner to response.")
response = self.output_cleaner(response)
logger.info(
f"Response after output cleaner: {response}"
)
self.short_memory.add(
role="Output Cleaner",
content=response,
)
if self.agent_ops_on is True and is_last is True:
self.check_end_session_agentops()
# Merge all responses
all_responses = [
response
for response in all_responses
if response is not None
]
self.agent_output.steps = self.short_memory.to_dict()
self.agent_output.full_history = (
self.short_memory.get_str()
)
self.agent_output.total_tokens = count_tokens(
self.short_memory.get_str()
)
# # Handle artifacts
# if self.artifacts_on is True:
# self.handle_artifacts(
# concat_strings(all_responses),
# self.artifacts_output_path,
# self.artifacts_file_extension,
# )
log_agent_data(self.to_dict()) log_agent_data(self.to_dict())
if self.autosave is True: if self.autosave is True:
self.save() self.save()
# More flexible output types return history_output_formatter(
if ( self.short_memory, type=self.output_type
self.output_type == "string" )
or self.output_type == "str"
):
return concat_strings(all_responses)
elif self.output_type == "list":
return all_responses
elif (
self.output_type == "json"
or self.return_step_meta is True
):
return self.agent_output.model_dump_json(indent=4)
elif self.output_type == "csv":
return self.dict_to_csv(
self.agent_output.model_dump()
)
elif self.output_type == "dict":
return self.agent_output.model_dump()
elif self.output_type == "yaml":
return yaml.safe_dump(
self.agent_output.model_dump(), sort_keys=False
)
elif self.output_type == "memory-list":
return self.short_memory.return_messages_as_list()
elif self.output_type == "memory-dict":
return (
self.short_memory.return_messages_as_dictionary()
)
elif self.return_history is True:
history = self.short_memory.get_str()
formatter.print_panel(
history, title=f"{self.agent_name} History"
)
return history
else:
raise ValueError(
f"Invalid output type: {self.output_type}"
)
except Exception as error: except Exception as error:
self._handle_run_error(error) self._handle_run_error(error)
@ -1933,7 +1803,7 @@ class Agent:
"""Send a message to the agent""" """Send a message to the agent"""
try: try:
logger.info(f"Sending agent message: {message}") logger.info(f"Sending agent message: {message}")
message = f"{agent_name}: {message}" message = f"To: {agent_name}: {message}"
return self.run(message, *args, **kwargs) return self.run(message, *args, **kwargs)
except Exception as error: except Exception as error:
logger.info(f"Error sending agent message: {error}") logger.info(f"Error sending agent message: {error}")
@ -2010,20 +1880,6 @@ class Agent:
) )
raise error raise error
def check_end_session_agentops(self):
if self.agent_ops_on is True:
try:
from swarms.utils.agent_ops_check import (
end_session_agentops,
)
# Try ending the session
return end_session_agentops()
except ImportError:
logger.error(
"Could not import agentops, try installing agentops: $ pip3 install agentops"
)
def memory_query(self, task: str = None, *args, **kwargs) -> None: def memory_query(self, task: str = None, *args, **kwargs) -> None:
try: try:
# Query the long term memory # Query the long term memory
@ -2150,50 +2006,6 @@ class Agent:
return out return out
def activate_agentops(self):
if self.agent_ops_on is True:
try:
from swarms.utils.agent_ops_check import (
try_import_agentops,
)
# Try importing agent ops
logger.info(
"Agent Ops Initializing, ensure that you have the agentops API key and the pip package installed."
)
try_import_agentops()
self.agent_ops_agent_name = self.agent_name
logger.info("Agentops successfully activated!")
except ImportError:
logger.error(
"Could not import agentops, try installing agentops: $ pip3 install agentops"
)
def llm_output_parser(self, response: Any) -> str:
"""Parse the output from the LLM"""
try:
if isinstance(response, dict):
if "choices" in response:
return response["choices"][0]["message"][
"content"
]
else:
return json.dumps(
response
) # Convert dict to string
elif isinstance(response, str):
return response
else:
return str(
response
) # Convert any other type to string
except Exception as e:
logger.error(f"Error parsing LLM output: {e}")
return str(
response
) # Return string representation as fallback
def log_step_metadata( def log_step_metadata(
self, loop: int, task: str, response: str self, loop: int, task: str, response: str
) -> Step: ) -> Step:
@ -2494,7 +2306,7 @@ class Agent:
def run( def run(
self, self,
task: Optional[str] = None, task: Optional[Union[str, Any]] = None,
img: Optional[str] = None, img: Optional[str] = None,
device: Optional[str] = "cpu", # gpu device: Optional[str] = "cpu", # gpu
device_id: Optional[int] = 0, device_id: Optional[int] = 0,
@ -2531,6 +2343,9 @@ class Agent:
Exception: If any other error occurs during execution. Exception: If any other error occurs during execution.
""" """
if not isinstance(task, str):
task = any_to_str(task)
if scheduled_run_date: if scheduled_run_date:
while datetime.now() < scheduled_run_date: while datetime.now() < scheduled_run_date:
time.sleep( time.sleep(
@ -2539,13 +2354,18 @@ class Agent:
try: try:
# If cluster ops disabled, run directly # If cluster ops disabled, run directly
return self._run( output = self._run(
task=task, task=task,
img=img, img=img,
*args, *args,
**kwargs, **kwargs,
) )
if self.tools_list_dictionary is not None:
return str_to_dict(output)
else:
return output
except ValueError as e: except ValueError as e:
self._handle_run_error(e) self._handle_run_error(e)
@ -2635,7 +2455,7 @@ class Agent:
) )
return agent.run( return agent.run(
task=f"From {self.agent_name}: {output}", task=f"From {self.agent_name}: Message: {output}",
img=img, img=img,
*args, *args,
**kwargs, **kwargs,
@ -2651,10 +2471,27 @@ class Agent:
""" """
Talk to multiple agents. Talk to multiple agents.
""" """
outputs = [] # o# Use the existing executor from self.executor or create a new one if needed
for agent in agents: with ThreadPoolExecutor() as executor:
output = self.talk_to(agent, task, *args, **kwargs) # Create futures for each agent conversation
outputs.append(output) futures = [
executor.submit(
self.talk_to, agent, task, *args, **kwargs
)
for agent in agents
]
# Wait for all futures to complete and collect results
outputs = []
for future in futures:
try:
result = future.result()
outputs.append(result)
except Exception as e:
logger.error(f"Error in agent communication: {e}")
outputs.append(
None
) # or handle error case as needed
return outputs return outputs
@ -2664,9 +2501,88 @@ class Agent:
""" """
return self.role return self.role
# def __getstate__(self): def pretty_print(self, response: str, loop_count: int):
# state = self.__dict__.copy() if self.no_print is False:
# # Remove or replace unpicklable attributes. if self.streaming_on is True:
# if '_queue' in state: # self.stream_response(response)
# del state['_queue'] formatter.print_panel_token_by_token(
# return state f"{self.agent_name}: {response}",
title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]",
)
else:
# logger.info(f"Response: {response}")
formatter.print_panel(
f"{self.agent_name}: {response}",
f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
)
def parse_llm_output(self, response: Any) -> str:
"""Parse and standardize the output from the LLM.
Args:
response (Any): The response from the LLM in any format
Returns:
str: Standardized string output
Raises:
ValueError: If the response format is unexpected and can't be handled
"""
try:
# Handle dictionary responses
if isinstance(response, dict):
if "choices" in response:
return response["choices"][0]["message"][
"content"
]
return json.dumps(
response
) # Convert other dicts to string
# Handle string responses
elif isinstance(response, str):
return response
# Handle list responses (from check_llm_outputs)
elif isinstance(response, list):
return "\n".join(response)
# Handle any other type by converting to string
else:
return str(response)
except Exception as e:
logger.error(f"Error parsing LLM output: {e}")
raise ValueError(
f"Failed to parse LLM output: {type(response)}"
)
def sentiment_and_evaluator(self, response: str):
if self.evaluator:
logger.info("Evaluating response...")
evaluated_response = self.evaluator(response)
print("Evaluated Response:" f" {evaluated_response}")
self.short_memory.add(
role="Evaluator",
content=evaluated_response,
)
# Sentiment analysis
if self.sentiment_analyzer:
logger.info("Analyzing sentiment...")
self.sentiment_analysis_handler(response)
def output_cleaner_op(self, response: str):
# Apply the cleaner function to the response
if self.output_cleaner is not None:
logger.info("Applying output cleaner to response.")
response = self.output_cleaner(response)
logger.info(f"Response after output cleaner: {response}")
self.short_memory.add(
role="Output Cleaner",
content=response,
)

@ -89,7 +89,6 @@ class BaseSwarm(ABC):
stopping_function: Optional[Callable] = None, stopping_function: Optional[Callable] = None,
stopping_condition: Optional[str] = "stop", stopping_condition: Optional[str] = "stop",
stopping_condition_args: Optional[Dict] = None, stopping_condition_args: Optional[Dict] = None,
agentops_on: Optional[bool] = False,
speaker_selection_func: Optional[Callable] = None, speaker_selection_func: Optional[Callable] = None,
rules: Optional[str] = None, rules: Optional[str] = None,
collective_memory_system: Optional[Any] = False, collective_memory_system: Optional[Any] = False,
@ -112,7 +111,6 @@ class BaseSwarm(ABC):
self.stopping_function = stopping_function self.stopping_function = stopping_function
self.stopping_condition = stopping_condition self.stopping_condition = stopping_condition
self.stopping_condition_args = stopping_condition_args self.stopping_condition_args = stopping_condition_args
self.agentops_on = agentops_on
self.speaker_selection_func = speaker_selection_func self.speaker_selection_func = speaker_selection_func
self.rules = rules self.rules = rules
self.collective_memory_system = collective_memory_system self.collective_memory_system = collective_memory_system
@ -167,11 +165,6 @@ class BaseSwarm(ABC):
self.stopping_condition_args = stopping_condition_args self.stopping_condition_args = stopping_condition_args
self.stopping_condition = stopping_condition self.stopping_condition = stopping_condition
# If agentops is enabled, try to import agentops
if agentops_on is True:
for agent in self.agents:
agent.agent_ops_on = True
# Handle speaker selection function # Handle speaker selection function
if speaker_selection_func is not None: if speaker_selection_func is not None:
if not callable(speaker_selection_func): if not callable(speaker_selection_func):

@ -262,7 +262,7 @@ class ConcurrentWorkflow(BaseSwarm):
) -> AgentOutputSchema: ) -> AgentOutputSchema:
start_time = datetime.now() start_time = datetime.now()
try: try:
output = agent.run(task=task, img=img) output = agent.run(task=task)
self.conversation.add( self.conversation.add(
agent.agent_name, agent.agent_name,

@ -119,7 +119,7 @@ class Conversation(BaseStructure):
content (Union[str, dict, list]): The content of the message to be added. content (Union[str, dict, list]): The content of the message to be added.
""" """
now = datetime.datetime.now() now = datetime.datetime.now()
timestamp = now.strftime("%Y-%m-%d %H:%M:%S") now.strftime("%Y-%m-%d %H:%M:%S")
# Base message with role # Base message with role
message = { message = {
@ -129,8 +129,12 @@ class Conversation(BaseStructure):
# Handle different content types # Handle different content types
if isinstance(content, dict) or isinstance(content, list): if isinstance(content, dict) or isinstance(content, list):
message["content"] = content message["content"] = content
elif self.time_enabled:
message["content"] = (
f"Time: {now.strftime('%Y-%m-%d %H:%M:%S')} \n {content}"
)
else: else:
message["content"] = f"Time: {timestamp} \n {content}" message["content"] = content
# Add the message to history immediately without waiting for token count # Add the message to history immediately without waiting for token count
self.conversation_history.append(message) self.conversation_history.append(message)
@ -510,6 +514,16 @@ class Conversation(BaseStructure):
""" """
return f"{self.conversation_history[-1]['role']}: {self.conversation_history[-1]['content']}" return f"{self.conversation_history[-1]['role']}: {self.conversation_history[-1]['content']}"
def get_final_message_content(self):
"""Return the content of the final message from the conversation history.
Returns:
str: The content of the final message.
"""
output = self.conversation_history[-1]["content"]
# print(output)
return output
# # Example usage # # Example usage
# # conversation = Conversation() # # conversation = Conversation()

@ -4,7 +4,8 @@ from loguru import logger
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
# Prompt templates for different agent roles # Prompt templates for different agent roles
GENERATOR_PROMPT = """You are a knowledgeable assistant tasked with providing accurate information on a wide range of topics. GENERATOR_PROMPT = """
You are a knowledgeable assistant tasked with providing accurate information on a wide range of topics.
Your responsibilities: Your responsibilities:
1. Provide accurate information based on your training data 1. Provide accurate information based on your training data
@ -22,7 +23,8 @@ When responding to queries:
Remember, it's better to acknowledge ignorance than to provide incorrect information. Remember, it's better to acknowledge ignorance than to provide incorrect information.
""" """
CRITIC_PROMPT = """You are a critical reviewer tasked with identifying potential inaccuracies, hallucinations, or unsupported claims in AI-generated text. CRITIC_PROMPT = """
You are a critical reviewer tasked with identifying potential inaccuracies, hallucinations, or unsupported claims in AI-generated text.
Your responsibilities: Your responsibilities:
1. Carefully analyze the provided text for factual errors 1. Carefully analyze the provided text for factual errors
@ -47,7 +49,8 @@ Focus particularly on:
Be thorough and specific in your critique. Provide actionable feedback for improvement. Be thorough and specific in your critique. Provide actionable feedback for improvement.
""" """
REFINER_PROMPT = """You are a refinement specialist tasked with improving text based on critical feedback. REFINER_PROMPT = """
You are a refinement specialist tasked with improving text based on critical feedback.
Your responsibilities: Your responsibilities:
1. Carefully review the original text and the critical feedback 1. Carefully review the original text and the critical feedback
@ -67,7 +70,8 @@ Guidelines for refinement:
The refined text should be helpful and informative while being scrupulously accurate. The refined text should be helpful and informative while being scrupulously accurate.
""" """
VALIDATOR_PROMPT = """You are a validation expert tasked with ensuring the highest standards of accuracy in refined AI outputs. VALIDATOR_PROMPT = """
You are a validation expert tasked with ensuring the highest standards of accuracy in refined AI outputs.
Your responsibilities: Your responsibilities:
1. Verify that all critical issues from previous feedback have been properly addressed 1. Verify that all critical issues from previous feedback have been properly addressed

@ -0,0 +1,226 @@
import json
import random
from swarms.structs.agent import Agent
from typing import List
from swarms.structs.conversation import Conversation
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
from swarms.utils.any_to_str import any_to_str
tools = [
{
"type": "function",
"function": {
"name": "select_agent",
"description": "Analyzes the input task and selects the most appropriate agent configuration, outputting both the agent name and the formatted response.",
"parameters": {
"type": "object",
"properties": {
"respond_or_no_respond": {
"type": "boolean",
"description": "Whether the agent should respond to the response or not.",
},
"reasoning": {
"type": "string",
"description": "The reasoning behind the selection of the agent and response.",
},
"agent_name": {
"type": "string",
"description": "The name of the selected agent that is most appropriate for handling the given task.",
},
"response": {
"type": "string",
"description": "A clear and structured description of the response for the next agent.",
},
},
"required": [
"reasoning",
"agent_name",
"response",
"respond_or_no_respond",
],
},
},
},
]
class DynamicConversationalSwarm:
def __init__(
self,
name: str = "Dynamic Conversational Swarm",
description: str = "A swarm that uses a dynamic conversational model to solve complex tasks.",
agents: List[Agent] = [],
max_loops: int = 1,
output_type: str = "list",
*args,
**kwargs,
):
self.name = name
self.description = description
self.agents = agents
self.max_loops = max_loops
self.output_type = output_type
self.conversation = Conversation()
# Agents in the chat
agents_in_chat = self.get_agents_info()
self.conversation.add(
role="Conversation Log", content=agents_in_chat
)
self.inject_tools()
# Inject tools into the agents
def inject_tools(self):
for agent in self.agents:
agent.tools_list_dictionary = tools
def parse_json_into_dict(self, json_str: str) -> dict:
try:
return json.loads(json_str)
except json.JSONDecodeError:
raise ValueError("Invalid JSON string")
def find_agent_by_name(self, agent_name: str) -> Agent:
for agent in self.agents:
if agent.name == agent_name:
return agent
raise ValueError(f"Agent with name {agent_name} not found")
def run_agent(self, agent_name: str, task: str) -> str:
agent = self.find_agent_by_name(agent_name)
return agent.run(task)
def fetch_random_agent_name(self) -> str:
return random.choice(self.agents).agent_name
def run(self, task: str) -> str:
"""
Run the dynamic conversational swarm for a specified number of loops.
Each agent has access to the full conversation history.
Args:
task (str): The initial task/prompt to process
Returns:
str: The final response after all loops are complete
"""
self.conversation.add(
role=f"{self.fetch_random_agent_name()}", content=task
)
# for loop in range(self.max_loops):
# # Add loop marker to conversation for clarity
# self.conversation.add(
# role="System",
# content=f"=== Starting Loop {loop + 1}/{self.max_loops} ==="
# )
# # First agent interaction
# current_agent = self.randomly_select_agent()
# response = self.run_agent(current_agent.name, self.conversation.get_str())
# self.conversation.add(role=current_agent.name, content=any_to_str(response))
# try:
# # Parse response and get next agent
# response_dict = self.parse_json_into_dict(response)
# # Check if we should continue or end the loop
# if not response_dict.get("respond_or_no_respond", True):
# break
# # Get the task description for the next agent
# next_task = response_dict.get("task_description", self.conversation.get_str())
# # Run the next agent with the specific task description
# next_agent = self.find_agent_by_name(response_dict["agent_name"])
# next_response = self.run_agent(next_agent.name, next_task)
# # Add both the task description and response to the conversation
# self.conversation.add(
# role="System",
# content=f"Response from {response_dict['agent_name']}: {next_task}"
# )
# self.conversation.add(role=next_agent.name, content=any_to_str(next_response))
# except (ValueError, KeyError) as e:
# self.conversation.add(
# role="System",
# content=f"Error in loop {loop + 1}: {str(e)}"
# )
# break
# Run first agent
current_agent = self.randomly_select_agent()
response = self.run_agent(
current_agent.agent_name, self.conversation.get_str()
)
self.conversation.add(
role=current_agent.agent_name,
content=any_to_str(response),
)
# Convert to json
response_dict = self.parse_json_into_dict(response)
# Fetch task
respone_two = response_dict["response"]
agent_name = response_dict["agent_name"]
print(f"Response from {agent_name}: {respone_two}")
# Run next agent
next_response = self.run_agent(
agent_name, self.conversation.get_str()
)
self.conversation.add(
role=agent_name, content=any_to_str(next_response)
)
# # Get the next agent
# response_three = self.parse_json_into_dict(next_response)
# agent_name_three = response_three["agent_name"]
# respone_four = response_three["response"]
# print(f"Response from {agent_name_three}: {respone_four}")
# # Run the next agent
# next_response = self.run_agent(agent_name_three, self.conversation.get_str())
# self.conversation.add(role=agent_name_three, content=any_to_str(next_response))
# Format and return the final conversation history
return history_output_formatter(
self.conversation, type=self.output_type
)
def randomly_select_agent(self) -> Agent:
return random.choice(self.agents)
def get_agents_info(self) -> str:
"""
Fetches and formats information about all available agents in the system.
Returns:
str: A formatted string containing names and descriptions of all agents.
"""
if not self.agents:
return "No agents currently available in the system."
agents_info = [
"Agents In the System:",
"",
] # Empty string for line spacing
for idx, agent in enumerate(self.agents, 1):
agents_info.extend(
[
f"[Agent {idx}]",
f"Name: {agent.name}",
f"Description: {agent.description}",
"", # Empty string for line spacing between agents
]
)
return "\n".join(agents_info).strip()

@ -1,4 +1,5 @@
import concurrent.futures import concurrent.futures
import random
from datetime import datetime from datetime import datetime
from typing import Callable, List from typing import Callable, List
@ -7,6 +8,13 @@ from pydantic import BaseModel, Field
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.structs.multi_agent_exec import get_agents_info
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
from swarms.prompts.multi_agent_collab_prompt import (
MULTI_AGENT_COLLAB_PROMPT_TWO,
)
class AgentResponse(BaseModel): class AgentResponse(BaseModel):
@ -230,15 +238,23 @@ class GroupChat:
speaker_fn: SpeakerFunction = round_robin, speaker_fn: SpeakerFunction = round_robin,
max_loops: int = 1, max_loops: int = 1,
rules: str = "", rules: str = "",
output_type: str = "string",
): ):
self.name = name self.name = name
self.description = description self.description = description
self.agents = agents self.agents = agents
self.speaker_fn = speaker_fn self.speaker_fn = speaker_fn
self.max_loops = max_loops self.max_loops = max_loops
self.conversation = Conversation(time_enabled=False) self.output_type = output_type
self.rules = rules self.rules = rules
self.conversation = Conversation(
time_enabled=False, rules=rules
)
agent_context = f"\n Group Chat Name: {self.name}\nGroup Chat Description: {self.description}\n Agents in your Group Chat: {get_agents_info(self.agents)}"
self.conversation.add(role="System", content=agent_context)
self.reliability_check() self.reliability_check()
def reliability_check(self): def reliability_check(self):
@ -248,23 +264,24 @@ class GroupChat:
Raises: Raises:
ValueError: If any required components are missing or invalid ValueError: If any required components are missing or invalid
""" """
if len(self.agents) < 2: if len(self.agents) < 2:
raise ValueError( raise ValueError(
"At least two agents are required for a group chat" "At least two agents are required for a group chat"
) )
if self.speaker_fn is None:
raise ValueError("No speaker function provided")
if self.max_loops <= 0: if self.max_loops <= 0:
raise ValueError("Max loops must be greater than 0") raise ValueError("Max loops must be greater than 0")
for agent in self.agents: for agent in self.agents:
if not isinstance(agent, Agent): agent.system_prompt += MULTI_AGENT_COLLAB_PROMPT_TWO
raise ValueError(
f"Invalid agent type: {type(agent)}. Must be Agent instance"
)
def run(self, task: str, img: str = None, *args, **kwargs) -> str: def run(self, task: str, img: str = None, *args, **kwargs) -> str:
""" """
Executes a conversation between agents about the given task. Executes a dynamic conversation between agents about the given task.
Agents are selected randomly to speak, creating a more natural flow
with varying conversation lengths.
Args: Args:
task (str): The task or topic for agents to discuss task (str): The task or topic for agents to discuss
@ -279,106 +296,85 @@ class GroupChat:
ValueError: If task is empty or invalid ValueError: If task is empty or invalid
Exception: If any error occurs during conversation Exception: If any error occurs during conversation
""" """
if not task or not isinstance(task, str): if not task or not isinstance(task, str):
raise ValueError("Task must be a non-empty string") raise ValueError("Task must be a non-empty string")
# Initialize conversation with context # Initialize conversation with context
agent_context = f"Group Chat Name: {self.name}\nGroup Chat Description: {self.description}\nRules: {self.rules}\n Other agents: {', '.join([a.agent_name for a in self.agents])}"
self.conversation.add(role="system", content=agent_context)
self.conversation.add(role="User", content=task) self.conversation.add(role="User", content=task)
print(
f"....... conversation history: \n {self.conversation.return_history_as_string()}"
)
try: try:
turn = 0 turn = 0
consecutive_silent_turns = 0 # Determine a random number of conversation turns
max_silent_turns = 2 # End conversation if no one speaks for this many turns target_turns = random.randint(1, 4)
logger.debug(
while turn < self.max_loops: f"Planning for approximately {target_turns} conversation turns"
context = self.conversation.return_messages_as_list() )
# Get agents who should speak this turn
speaking_agents = [
agent
for agent in self.agents
if self.speaker_fn(context, agent)
]
if not speaking_agents:
consecutive_silent_turns += 1
if consecutive_silent_turns >= max_silent_turns:
logger.debug(
"Multiple silent turns, ending conversation"
)
break
continue
consecutive_silent_turns = ( # Keep track of which agent spoke last to create realistic exchanges
0 # Reset counter when agents speak last_speaker = None
)
# Process each speaking agent while turn < target_turns:
for agent in speaking_agents:
try:
# Build context-aware prompt
prompt = (
f"You're {agent.agent_name} participating in a group chat.\n"
f"Chat Purpose: {self.description}\n"
f"Current Discussion: {task}\n"
f"Chat History:\n{self.conversation.return_history_as_string()}\n"
f"As {agent.agent_name}, please provide your response:"
)
print( # Select an agent to speak (different from the last speaker if possible)
f"....... what the agent sees prompt: \n {prompt}" available_agents = self.agents.copy()
)
message = agent.run( if last_speaker and len(available_agents) > 1:
task=prompt, available_agents.remove(last_speaker)
img=img,
*args,
**kwargs,
)
if not message or message.isspace(): current_speaker = random.choice(available_agents)
logger.warning(
f"Empty response from {agent.agent_name}, skipping" try:
) # Build complete context with conversation history
continue conversation_history = (
self.conversation.return_history_as_string()
)
# Prepare a prompt that explicitly encourages responding to others
if last_speaker:
prompt = f"The previous message was from {last_speaker.agent_name}. As {current_speaker.agent_name}, please respond to what they and others have said about: {task}"
else:
prompt = f"As {current_speaker.agent_name}, please start the discussion about: {task}"
# Get the agent's response with full context awareness
message = current_speaker.run(
task=f"{conversation_history} {prompt}",
)
# Only add meaningful responses
if message and not message.isspace():
self.conversation.add( self.conversation.add(
role=agent.agent_name, content=message role=current_speaker.agent_name,
content=message,
) )
logger.info( logger.info(
f"Turn {turn}, {agent.agent_name} responded" f"Turn {turn}, {current_speaker.agent_name} responded"
) )
except Exception as e: # Update the last speaker
logger.error( last_speaker = current_speaker
f"Error from {agent.agent_name}: {e}" turn += 1
)
# Continue with other agents instead of crashing
continue
turn += 1 # Occasionally end early to create natural variation
if (
turn > 3 and random.random() < 0.15
): # 15% chance to end after at least 3 turns
logger.debug(
"Random early conversation end"
)
break
# Check if conversation has reached a natural conclusion except Exception as e:
last_messages = ( logger.error(
context[-3:] if len(context) >= 3 else context f"Error from {current_speaker.agent_name}: {e}"
)
if all(
"conclusion" in msg.lower()
for msg in last_messages
):
logger.debug(
"Natural conversation conclusion detected"
) )
break # Skip this agent and continue conversation
continue
return self.conversation.return_history_as_string() return history_output_formatter(
self.conversation, self.output_type
)
except Exception as e: except Exception as e:
logger.error(f"Error in chat: {e}") logger.error(f"Error in chat: {e}")

@ -2,6 +2,7 @@ import os
from typing import List from typing import List
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.structs.multi_agent_exec import get_swarms_info
from swarms.structs.swarm_router import SwarmRouter from swarms.structs.swarm_router import SwarmRouter
from swarms.utils.history_output_formatter import ( from swarms.utils.history_output_formatter import (
history_output_formatter, history_output_formatter,
@ -120,12 +121,27 @@ class HybridHierarchicalClusterSwarm:
self.router_agent = Agent( self.router_agent = Agent(
agent_name="Router Agent", agent_name="Router Agent",
agent_description="A router agent that routes tasks to the appropriate swarms.", agent_description="A router agent that routes tasks to the appropriate swarms.",
system_prompt=f"{router_system_prompt}\n\n{self.get_swarms_info()}", system_prompt=f"{router_system_prompt}\n\n{get_swarms_info()}",
tools_list_dictionary=tools, tools_list_dictionary=tools,
model_name=router_agent_model_name, model_name=router_agent_model_name,
max_loops=1, max_loops=1,
output_type="final",
) )
def convert_str_to_dict(self, response: str):
# Handle response whether it's a string or dictionary
if isinstance(response, str):
try:
import json
response = json.loads(response)
except json.JSONDecodeError:
raise ValueError(
"Invalid JSON response from router agent"
)
return response
def run(self, task: str, *args, **kwargs): def run(self, task: str, *args, **kwargs):
""" """
Runs the routing process for a given task. Runs the routing process for a given task.
@ -146,23 +162,19 @@ class HybridHierarchicalClusterSwarm:
response = self.router_agent.run(task=task) response = self.router_agent.run(task=task)
# Handle response whether it's a string or dictionary
if isinstance(response, str): if isinstance(response, str):
try: response = self.convert_str_to_dict(response)
import json else:
pass
response = json.loads(response)
except json.JSONDecodeError:
raise ValueError(
"Invalid JSON response from router agent"
)
swarm_name = response.get("swarm_name") swarm_name = response.get("swarm_name")
task_description = response.get("task_description") task_description = response.get("task_description")
if not swarm_name or not task_description: if not swarm_name or not task_description:
raise ValueError( raise ValueError(
"Invalid response from router agent: missing swarm_name or task_description." "Invalid response from router agent: both 'swarm_name' and 'task_description' must be present. "
f"Received: swarm_name={swarm_name}, task_description={task_description}. "
f"Please check the response format from the model: {self.router_agent.model_name}."
) )
self.route_task(swarm_name, task_description) self.route_task(swarm_name, task_description)
@ -242,32 +254,3 @@ class HybridHierarchicalClusterSwarm:
results.append(f"Error processing task: {str(e)}") results.append(f"Error processing task: {str(e)}")
return results return results
def get_swarms_info(self) -> str:
"""
Fetches and formats information about all available swarms in the system.
Returns:
str: A formatted string containing names and descriptions of all swarms.
"""
if not self.swarms:
return "No swarms currently available in the system."
swarm_info = [
"Available Swarms:",
"",
] # Empty string for line spacing
for idx, swarm in enumerate(self.swarms, 1):
swarm_info.extend(
[
f"[Swarm {idx}]",
f"Name: {swarm.name}",
f"Description: {swarm.description}",
f"Length of Agents: {len(swarm.agents)}",
f"Swarm Type: {swarm.swarm_type}",
"", # Empty string for line spacing between swarms
]
)
return "\n".join(swarm_info).strip()

@ -5,7 +5,7 @@ from concurrent.futures import (
ThreadPoolExecutor, ThreadPoolExecutor,
) )
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, List from typing import Any, Callable, List, Union
import psutil import psutil
@ -415,141 +415,65 @@ def run_agents_with_tasks_concurrently(
) )
# from joblib import Parallel, delayed def get_swarms_info(swarms: List[Callable]) -> str:
"""
Fetches and formats information about all available swarms in the system.
# def run_agents_joblib(
# agents: List[Any], Returns:
# tasks: List[str] = [], str: A formatted string containing names and descriptions of all swarms.
# img: List[str] = None, """
# max_workers: int = None, if not swarms:
# max_loops: int = 1, return "No swarms currently available in the system."
# prefer: str = "threads",
# ) -> List[Any]: swarm_info = [
# """ "Available Swarms:",
# Executes a list of agents with their corresponding tasks concurrently using joblib. "",
] # Empty string for line spacing
# Each agent is expected to have a .run() method that accepts at least:
# - task: A string indicating the task to execute. for idx, swarm in enumerate(swarms, 1):
# - img: (Optional) A string representing image input. swarm_info.extend(
[
# Args: f"[Swarm {idx}]",
# agents (List[Any]): A list of agent instances. f"Name: {swarm.name}",
# tasks (List[str], optional): A list of task strings. If provided, each agent gets a task. f"Description: {swarm.description}",
# If fewer tasks than agents, the first task is reused. f"Length of Agents: {len(swarm.agents)}",
# img (List[str], optional): A list of image strings. If provided, each agent gets an image. f"Swarm Type: {swarm.swarm_type}",
# If fewer images than agents, the first image is reused. "", # Empty string for line spacing between swarms
# max_workers (int, optional): The maximum number of processes to use. ]
# Defaults to all available CPU cores. )
# max_loops (int, optional): Number of times to execute the whole batch.
return "\n".join(swarm_info).strip()
# Returns:
# List[Any]: The list of results returned by each agents run() method.
# """ def get_agents_info(
# max_workers = max_workers or os.cpu_count() agents: List[Union[Agent, Callable]], team_name: str = None
# results = [] ) -> str:
"""
# for _ in range(max_loops): Fetches and formats information about all available agents in the system.
# results.extend(
# Parallel(n_jobs=max_workers, prefer=prefer)( Returns:
# delayed(lambda a, t, i: a.run(task=t, img=i))( str: A formatted string containing names and descriptions of all swarms.
# agent, """
# ( if not agents:
# tasks[idx] return "No agents currently available in the system."
# if tasks and idx < len(tasks)
# else (tasks[0] if tasks else "") agent_info = [
# ), f"Available Agents for Team: {team_name}",
# ( "",
# img[idx] ] # Empty string for line spacing
# if img and idx < len(img)
# else (img[0] if img else None) for idx, agent in enumerate(agents, 1):
# ), agent_info.extend(
# ) [
# for idx, agent in enumerate(agents) "\n",
# ) f"[Agent {idx}]",
# ) f"Name: {agent.agent_name}",
f"Description: {agent.agent_description}",
# return results f"Role: {agent.role}",
f"Model: {agent.model_name}",
f"Max Loops: {agent.max_loops}",
# # Example usage: "\n",
# if __name__ == '__main__': ]
# # Dummy Agent class for demonstration. )
# class Agent:
# def __init__(self, agent_name, max_loops, model_name): return "\n".join(agent_info).strip()
# self.agent_name = agent_name
# self.max_loops = max_loops
# self.model_name = model_name
# def run(self, task: str, img: str = None) -> str:
# img_info = f" with image '{img}'" if img else ""
# return (f"{self.agent_name} using model '{self.model_name}' processed task: '{task}'{img_info}")
# # Create a few Agent instances.
# agents = [
# Agent(
# agent_name=f"Financial-Analysis-Agent_parallel_swarm{i}",
# max_loops=1,
# model_name="gpt-4o-mini",
# )
# for i in range(3)
# ]
# task = "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
# outputs = run_agents_process_pool(agents, tasks=[task])
# for i, output in enumerate(outputs):
# print(f"Output from agent {i+1}:\n{output}")
# # Example usage:
# if __name__ == '__main__':
# # A sample agent class with a run method.
# class SampleAgent:
# def __init__(self, name):
# self.name = name
# def run(self, task, device, device_id, no_clusterops):
# # Simulate some processing.
# return (f"Agent {self.name} processed task '{task}' on {device} "
# f"(device_id={device_id}), no_clusterops={no_clusterops}")
# # Create a list of sample agents.
# agents = [SampleAgent(f"Agent_{i}") for i in range(5)]
# # Define tasks; if fewer tasks than agents, the first task will be reused.
# tasks = ["task1", "task2", "task3"]
# outputs = run_agents_with_tasks_concurrently(
# agents=agents,
# tasks=tasks,
# max_workers=4,
# device="cpu",
# device_id=1,
# all_cores=True,
# no_clusterops=False
# )
# for output in outputs:
# print(output)
# # Example usage:
# if __name__ == "__main__":
# # Initialize your agents (for example, 3 agents)
# agents = [
# Agent(
# agent_name=f"Financial-Analysis-Agent_parallel_swarm{i}",
# max_loops=1,
# model_name="gpt-4o-mini",
# )
# for i in range(3)
# ]
# # Generate a list of tasks.
# tasks = [
# "How can I establish a ROTH IRA to buy stocks and get a tax break?",
# "What are the criteria for establishing a ROTH IRA?",
# "What are the tax benefits of a ROTH IRA?",
# "How to buy stocks using a ROTH IRA?",
# "What are the limitations of a ROTH IRA?",
# ]
# outputs = run_agents_joblib(agents, tasks)

@ -2,20 +2,21 @@ import asyncio
import json import json
import uuid import uuid
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional from typing import Any, Callable, Dict, List, Optional
from pydantic import BaseModel, Field
from swarms.schemas.agent_step_schemas import ManySteps
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.agents_available import showcase_available_agents
from swarms.structs.base_swarm import BaseSwarm from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.any_to_str import any_to_str
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
from swarms.utils.loguru_logger import initialize_logger from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.main import log_agent_data from swarms.telemetry.main import log_agent_data
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.structs.output_types import OutputType from swarms.structs.output_types import OutputType
from swarms.structs.multi_agent_exec import get_agents_info
logger = initialize_logger(log_folder="rearrange") logger = initialize_logger(log_folder="rearrange")
@ -24,35 +25,6 @@ def swarm_id():
return uuid.uuid4().hex return uuid.uuid4().hex
class AgentRearrangeInput(BaseModel):
swarm_id: Optional[str] = None
name: Optional[str] = None
description: Optional[str] = None
flow: Optional[str] = None
max_loops: Optional[int] = None
time: str = Field(
default_factory=lambda: datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
),
description="The time the agent was created.",
)
output_type: OutputType = Field(default="final")
class AgentRearrangeOutput(BaseModel):
output_id: str = Field(
default=swarm_id(), description="Output-UUID"
)
input: Optional[AgentRearrangeInput] = None
outputs: Optional[List[ManySteps]] = None
time: str = Field(
default_factory=lambda: datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
),
description="The time the agent was created.",
)
class AgentRearrange(BaseSwarm): class AgentRearrange(BaseSwarm):
""" """
A class representing a swarm of agents for rearranging tasks. A class representing a swarm of agents for rearranging tasks.
@ -117,6 +89,7 @@ class AgentRearrange(BaseSwarm):
autosave: bool = True, autosave: bool = True,
return_entire_history: bool = False, return_entire_history: bool = False,
rules: str = None, rules: str = None,
team_awareness: bool = False,
*args, *args,
**kwargs, **kwargs,
): ):
@ -146,56 +119,18 @@ class AgentRearrange(BaseSwarm):
self.no_use_clusterops = no_use_clusterops self.no_use_clusterops = no_use_clusterops
self.autosave = autosave self.autosave = autosave
self.return_entire_history = return_entire_history self.return_entire_history = return_entire_history
self.output_schema = AgentRearrangeOutput(
input=AgentRearrangeInput(
swarm_id=id,
name=name,
description=description,
flow=flow,
max_loops=max_loops,
),
outputs=[],
)
self.conversation = Conversation() self.conversation = Conversation(
time_enabled=False, token_count=False
)
if rules: if rules:
self.conversation.add("user", rules) self.conversation.add("User", rules)
def showcase_agents(self):
# Get formatted agent info once
agents_available = showcase_available_agents(
name=self.name,
description=self.description,
agents=self.agents,
format="Table",
)
return agents_available if team_awareness is True:
agents_info = get_agents_info(self.agents, self.name)
def rearrange_prompt_prep(self) -> str: self.conversation.add("Your Swarm", agents_info)
"""Prepares a formatted prompt describing the swarm configuration.
Returns:
str: A formatted string containing the swarm's name, description,
flow pattern, and participating agents.
"""
agents_available = self.showcase_agents()
prompt = f"""
===== Swarm Configuration =====
Name: {self.name}
Description: {self.description}
===== Execution Flow =====
{self.flow}
===== Participating Agents =====
{agents_available}
===========================
"""
return prompt
def set_custom_flow(self, flow: str): def set_custom_flow(self, flow: str):
self.flow = flow self.flow = flow
@ -313,7 +248,7 @@ class AgentRearrange(BaseSwarm):
Exception: For any other errors during execution Exception: For any other errors during execution
""" """
try: try:
self.conversation.add("user", task) self.conversation.add("User", task)
if not self.validate_flow(): if not self.validate_flow():
logger.error("Flow validation failed") logger.error("Flow validation failed")
@ -321,15 +256,13 @@ class AgentRearrange(BaseSwarm):
tasks = self.flow.split("->") tasks = self.flow.split("->")
current_task = task current_task = task
all_responses = []
response_dict = {} response_dict = {}
previous_agent = None
logger.info( logger.info(
f"Starting task execution with {len(tasks)} steps" f"Starting task execution with {len(tasks)} steps"
) )
# Handle custom tasks # # Handle custom tasks
if custom_tasks is not None: if custom_tasks is not None:
logger.info("Processing custom tasks") logger.info("Processing custom tasks")
c_agent_name, c_task = next( c_agent_name, c_task = next(
@ -354,150 +287,65 @@ class AgentRearrange(BaseSwarm):
name.strip() for name in task.split(",") name.strip() for name in task.split(",")
] ]
# Prepare prompt with previous agent info
prompt_prefix = ""
if previous_agent and task_idx > 0:
prompt_prefix = f"Previous agent {previous_agent} output: {current_task}\n"
elif task_idx == 0:
prompt_prefix = "Initial task: "
if len(agent_names) > 1: if len(agent_names) > 1:
# Parallel processing # Parallel processing
logger.info( logger.info(
f"Running agents in parallel: {agent_names}" f"Running agents in parallel: {agent_names}"
) )
results = []
for agent_name in agent_names: for agent_name in agent_names:
if agent_name == "H":
if (
self.human_in_the_loop
and self.custom_human_in_the_loop
):
current_task = (
self.custom_human_in_the_loop(
prompt_prefix
+ str(current_task)
)
)
else:
current_task = input(
prompt_prefix
+ "Enter your response: "
)
results.append(current_task)
response_dict[agent_name] = (
current_task
)
else:
agent = self.agents[agent_name]
task_with_context = (
prompt_prefix + str(current_task)
if current_task
else prompt_prefix
)
result = agent.run(
task=task_with_context,
img=img,
is_last=is_last,
*args,
**kwargs,
)
result = str(result)
self.conversation.add(
agent.agent_name, result
)
results.append(result)
response_dict[agent_name] = result
self.output_schema.outputs.append(
agent.agent_output
)
logger.debug(
f"Agent {agent_name} output: {result}"
)
current_task = "; ".join(results)
all_responses.extend(results)
previous_agent = ",".join(agent_names)
else:
# Sequential processing
logger.info(
f"Running agent sequentially: {agent_names[0]}"
)
agent_name = agent_names[0]
if agent_name == "H":
if (
self.human_in_the_loop
and self.custom_human_in_the_loop
):
current_task = (
self.custom_human_in_the_loop(
prompt_prefix
+ str(current_task)
)
)
else:
current_task = input(
prompt_prefix
+ "Enter the next task: "
)
response_dict[agent_name] = current_task
else:
agent = self.agents[agent_name] agent = self.agents[agent_name]
task_with_context = ( result = agent.run(
prompt_prefix + str(current_task) task=self.conversation.get_str(),
if current_task
else prompt_prefix
)
current_task = agent.run(
task=task_with_context,
img=img, img=img,
is_last=is_last, is_last=is_last,
*args, *args,
**kwargs, **kwargs,
) )
current_task = str(current_task) result = any_to_str(result)
self.conversation.add( self.conversation.add(
agent.agent_name, current_task agent.agent_name, result
)
response_dict[agent_name] = current_task
self.output_schema.outputs.append(
agent.agent_output
) )
response_dict[agent_name] = result
logger.debug( logger.debug(
f"Agent {agent_name} output: {current_task}" f"Agent {agent_name} output: {result}"
) )
all_responses.append( ",".join(agent_names)
f"Agent Name: {agent.agent_name} \n Output: {current_task} "
else:
# Sequential processing
logger.info(
f"Running agent sequentially: {agent_names[0]}"
) )
previous_agent = agent_name agent_name = agent_names[0]
loop_count += 1 agent = self.agents[agent_name]
logger.info("Task execution completed") current_task = agent.run(
task=self.conversation.get_str(),
img=img,
is_last=is_last,
*args,
**kwargs,
)
current_task = any_to_str(current_task)
if self.return_json: self.conversation.add(
return self.output_schema.model_dump_json(indent=4) agent.agent_name, current_task
)
if self.return_entire_history: response_dict[agent_name] = current_task
return self.output_schema.model_dump_json(indent=4)
# Handle different output types loop_count += 1
if self.output_type == "all":
output = " ".join(all_responses) logger.info("Task execution completed")
elif self.output_type == "list":
output = all_responses
elif self.output_type == "dict":
output = (
self.conversation.return_messages_as_dictionary()
)
else: # "final"
output = current_task
return output return history_output_formatter(
self.conversation, self.output_type
)
except Exception as e: except Exception as e:
self._catch_error(e) self._catch_error(e)
@ -542,13 +390,19 @@ class AgentRearrange(BaseSwarm):
The result from executing the task through the cluster operations wrapper. The result from executing the task through the cluster operations wrapper.
""" """
try: try:
return self._run( log_agent_data(self.to_dict())
out = self._run(
task=task, task=task,
img=img, img=img,
*args, *args,
**kwargs, **kwargs,
) )
log_agent_data(self.to_dict())
return out
except Exception as e: except Exception as e:
self._catch_error(e) self._catch_error(e)
@ -780,6 +634,8 @@ class AgentRearrange(BaseSwarm):
def rearrange( def rearrange(
name: str = None,
description: str = None,
agents: List[Agent] = None, agents: List[Agent] = None,
flow: str = None, flow: str = None,
task: str = None, task: str = None,
@ -807,6 +663,11 @@ def rearrange(
rearrange(agents, flow, task) rearrange(agents, flow, task)
""" """
agent_system = AgentRearrange( agent_system = AgentRearrange(
agents=agents, flow=flow, *args, **kwargs name=name,
description=description,
agents=agents,
flow=flow,
*args,
**kwargs,
) )
return agent_system.run(task, img=img, *args, **kwargs) return agent_system.run(task=task, img=img)

@ -1,8 +1,9 @@
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List, Optional from typing import List, Optional
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.rearrange import AgentRearrange
from swarms.structs.output_types import OutputType from swarms.structs.output_types import OutputType
from concurrent.futures import ThreadPoolExecutor, as_completed from swarms.structs.rearrange import AgentRearrange
from swarms.utils.loguru_logger import initialize_logger from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="sequential_workflow") logger = initialize_logger(log_folder="sequential_workflow")
@ -10,18 +11,20 @@ logger = initialize_logger(log_folder="sequential_workflow")
class SequentialWorkflow: class SequentialWorkflow:
""" """
Initializes a SequentialWorkflow object, which orchestrates the execution of a sequence of agents. A class that orchestrates the execution of a sequence of agents in a defined workflow.
Args: Args:
name (str, optional): The name of the workflow. Defaults to "SequentialWorkflow". name (str, optional): The name of the workflow. Defaults to "SequentialWorkflow".
description (str, optional): A description of the workflow. Defaults to "Sequential Workflow, where agents are executed in a sequence." description (str, optional): A description of the workflow. Defaults to "Sequential Workflow, where agents are executed in a sequence."
agents (List[Agent], optional): The list of agents in the workflow. Defaults to None. agents (List[Agent], optional): A list of agents that will be part of the workflow. Defaults to an empty list.
max_loops (int, optional): The maximum number of loops to execute the workflow. Defaults to 1. max_loops (int, optional): The maximum number of times to execute the workflow. Defaults to 1.
*args: Variable length argument list. output_type (OutputType, optional): The format of the output from the workflow. Defaults to "dict".
**kwargs: Arbitrary keyword arguments. shared_memory_system (callable, optional): A callable for managing shared memory between agents. Defaults to None.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Raises: Raises:
ValueError: If agents list is None or empty, or if max_loops is 0 ValueError: If the agents list is None or empty, or if max_loops is set to 0.
""" """
def __init__( def __init__(
@ -31,9 +34,7 @@ class SequentialWorkflow:
agents: List[Agent] = [], agents: List[Agent] = [],
max_loops: int = 1, max_loops: int = 1,
output_type: OutputType = "dict", output_type: OutputType = "dict",
return_json: bool = False,
shared_memory_system: callable = None, shared_memory_system: callable = None,
return_entire_history: bool = False,
*args, *args,
**kwargs, **kwargs,
): ):
@ -42,9 +43,7 @@ class SequentialWorkflow:
self.agents = agents self.agents = agents
self.max_loops = max_loops self.max_loops = max_loops
self.output_type = output_type self.output_type = output_type
self.return_json = return_json
self.shared_memory_system = shared_memory_system self.shared_memory_system = shared_memory_system
self.return_entire_history = return_entire_history
self.reliability_check() self.reliability_check()
self.flow = self.sequential_flow() self.flow = self.sequential_flow()
@ -56,7 +55,6 @@ class SequentialWorkflow:
flow=self.flow, flow=self.flow,
max_loops=max_loops, max_loops=max_loops,
output_type=output_type, output_type=output_type,
return_json=return_json,
shared_memory_system=shared_memory_system, shared_memory_system=shared_memory_system,
*args, *args,
**kwargs, **kwargs,
@ -101,7 +99,7 @@ class SequentialWorkflow:
if self.max_loops == 0: if self.max_loops == 0:
raise ValueError("max_loops cannot be 0") raise ValueError("max_loops cannot be 0")
logger.info("Checks completed your swarm is ready.") logger.info("Checks completed; your swarm is ready.")
def run( def run(
self, self,
@ -114,25 +112,25 @@ class SequentialWorkflow:
no_use_clusterops: bool = True, no_use_clusterops: bool = True,
*args, *args,
**kwargs, **kwargs,
) -> str: ):
""" """
Executes a task through the agents in the dynamically constructed flow. Executes a specified task through the agents in the dynamically constructed flow.
Args: Args:
task (str): The task for the agents to execute. task (str): The task for the agents to execute.
device (str): The device to use for the agents to execute. img (Optional[str]): An optional image input for the agents.
all_cores (bool): Whether to use all cores. device (str): The device to use for the agents to execute. Defaults to "cpu".
all_gpus (bool): Whether to use all gpus. all_cores (bool): Whether to utilize all CPU cores. Defaults to False.
device_id (int): The device id to use for the agents to execute. all_gpus (bool): Whether to utilize all available GPUs. Defaults to False.
no_use_clusterops (bool): Whether to use clusterops. device_id (int): The specific device ID to use for execution. Defaults to 0.
no_use_clusterops (bool): Whether to avoid using cluster operations. Defaults to True.
Returns: Returns:
str: The final result after processing through all agents. str: The final result after processing through all agents.
Raises: Raises:
ValueError: If task is None or empty ValueError: If the task is None or empty.
Exception: If any error occurs during task execution Exception: If any error occurs during task execution.
""" """
try: try:
@ -143,17 +141,6 @@ class SequentialWorkflow:
**kwargs, **kwargs,
) )
if self.output_type == "dict":
result = (
self.agent_rearrange.conversation.return_messages_as_dictionary()
)
elif self.output_type == "list":
result = (
self.agent_rearrange.conversation.return_messages_as_list()
)
elif self.output_type == "str" or self.return_json:
result = self.agent_rearrange.conversation.get_str()
return result return result
except Exception as e: except Exception as e:
logger.error( logger.error(
@ -161,7 +148,7 @@ class SequentialWorkflow:
) )
raise e raise e
def __call__(self, task: str, *args, **kwargs) -> str: def __call__(self, task: str, *args, **kwargs):
return self.run(task, *args, **kwargs) return self.run(task, *args, **kwargs)
def run_batched(self, tasks: List[str]) -> List[str]: def run_batched(self, tasks: List[str]) -> List[str]:
@ -169,14 +156,14 @@ class SequentialWorkflow:
Executes a batch of tasks through the agents in the dynamically constructed flow. Executes a batch of tasks through the agents in the dynamically constructed flow.
Args: Args:
tasks (List[str]): The tasks for the agents to execute. tasks (List[str]): A list of tasks for the agents to execute.
Returns: Returns:
List[str]: The final results after processing through all agents. List[str]: A list of final results after processing through all agents.
Raises: Raises:
ValueError: If tasks is None or empty ValueError: If tasks is None or empty.
Exception: If any error occurs during task execution Exception: If any error occurs during task execution.
""" """
if not tasks or not all( if not tasks or not all(
isinstance(task, str) for task in tasks isinstance(task, str) for task in tasks
@ -195,7 +182,7 @@ class SequentialWorkflow:
async def run_async(self, task: str) -> str: async def run_async(self, task: str) -> str:
""" """
Executes the task through the agents in the dynamically constructed flow asynchronously. Executes the specified task through the agents in the dynamically constructed flow asynchronously.
Args: Args:
task (str): The task for the agents to execute. task (str): The task for the agents to execute.
@ -204,8 +191,8 @@ class SequentialWorkflow:
str: The final result after processing through all agents. str: The final result after processing through all agents.
Raises: Raises:
ValueError: If task is None or empty ValueError: If task is None or empty.
Exception: If any error occurs during task execution Exception: If any error occurs during task execution.
""" """
if not task or not isinstance(task, str): if not task or not isinstance(task, str):
raise ValueError("Task must be a non-empty string") raise ValueError("Task must be a non-empty string")
@ -223,14 +210,14 @@ class SequentialWorkflow:
Executes a batch of tasks through the agents in the dynamically constructed flow concurrently. Executes a batch of tasks through the agents in the dynamically constructed flow concurrently.
Args: Args:
tasks (List[str]): The tasks for the agents to execute. tasks (List[str]): A list of tasks for the agents to execute.
Returns: Returns:
List[str]: The final results after processing through all agents. List[str]: A list of final results after processing through all agents.
Raises: Raises:
ValueError: If tasks is None or empty ValueError: If tasks is None or empty.
Exception: If any error occurs during task execution Exception: If any error occurs during task execution.
""" """
if not tasks or not all( if not tasks or not all(
isinstance(task, str) for task in tasks isinstance(task, str) for task in tasks

@ -193,7 +193,8 @@ class SwarmRouter:
) )
# Handle Automated Prompt Engineering # Handle Automated Prompt Engineering
self.activate_ape() if self.auto_generate_prompts is True:
self.activate_ape()
# Handle shared memory # Handle shared memory
if self.shared_memory_system is not None: if self.shared_memory_system is not None:

@ -1,59 +1,40 @@
import os import os
import logging
import warnings import warnings
import concurrent.futures from pathlib import Path
from dotenv import load_dotenv
from loguru import logger
from swarms.utils.disable_logging import disable_logging from swarms.utils.disable_logging import disable_logging
from loguru import logger
def bootup(): def bootup():
"""Initialize swarms environment and configuration """Super-fast initialization of swarms environment"""
Handles environment setup, logging configuration, telemetry,
and workspace initialization.
"""
try: try:
# Load environment variables # Cache env vars
load_dotenv() verbose = os.getenv("SWARMS_VERBOSE_GLOBAL", "False").lower()
workspace_path = Path.cwd() / "agent_workspace"
# Configure logging
if (
os.getenv("SWARMS_VERBOSE_GLOBAL", "False").lower()
== "false"
):
logger.disable("")
logging.disable(logging.CRITICAL)
# Configure logging early
if verbose == "false":
logger.disable("CRITICAL")
else: else:
logger.enable("") logger.enable("")
# Silent wandb # Silence wandb
os.environ["WANDB_SILENT"] = "true" os.environ["WANDB_SILENT"] = "true"
# Configure workspace # Setup workspace dir only if needed
workspace_dir = os.path.join(os.getcwd(), "agent_workspace") if not workspace_path.exists():
os.makedirs(workspace_dir, exist_ok=True) workspace_path.mkdir(parents=True, exist_ok=True)
os.environ["WORKSPACE_DIR"] = workspace_dir os.environ["WORKSPACE_DIR"] = str(workspace_path)
# Suppress warnings # Suppress deprecation warnings
warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=DeprecationWarning)
# Run telemetry functions concurrently # Run lightweight telemetry
try: try:
with concurrent.futures.ThreadPoolExecutor( disable_logging()
max_workers=2
) as executor:
future_disable_logging = executor.submit(
disable_logging
)
# Wait for completion and check for exceptions
future_disable_logging.result()
except Exception as e: except Exception as e:
logger.error(f"Error running telemetry functions: {e}") logger.error(f"Telemetry error: {e}")
except Exception as e: except Exception as e:
logger.error(f"Error during bootup: {str(e)}") logger.error(f"Bootup error: {str(e)}")
raise raise

@ -1,11 +1,14 @@
import datetime
import hashlib import hashlib
import os
import platform import platform
import socket import socket
import subprocess import subprocess
import threading
import uuid import uuid
from typing import Dict from typing import Dict
import aiohttp
import httpx
import pkg_resources import pkg_resources
import psutil import psutil
import requests import requests
@ -263,9 +266,8 @@ def capture_system_data() -> Dict[str, str]:
return {} return {}
def log_agent_data(data_dict: dict) -> dict | None: def _log_agent_data(data_dict: dict) -> dict | None:
""" """
Silently logs agent data to the Swarms database with retry logic.
Args: Args:
data_dict (dict): The dictionary containing the agent data to be logged. data_dict (dict): The dictionary containing the agent data to be logged.
@ -274,29 +276,67 @@ def log_agent_data(data_dict: dict) -> dict | None:
dict | None: The JSON response from the server if successful, otherwise None. dict | None: The JSON response from the server if successful, otherwise None.
""" """
if not data_dict: if not data_dict:
return None # Immediately exit if the input is empty return None
url = "https://swarms.world/api/get-agents/log-agents" url = "https://swarms.world/api/get-agents/log-agents"
headers = { headers = {
"Content-Type": "application/json", "Content-Type": "application/json",
"Authorization": os.getenv("SWARMS_API_KEY"), "Authorization": "sk-xxx", # replace with actual
}
payload = {
"data": data_dict,
"system_data": get_user_device_data(),
"timestamp": datetime.datetime.now(datetime.UTC).isoformat(),
} }
try: try:
response = requests.post( with httpx.Client(http2=True, timeout=3.0) as client:
url, json=data_dict, headers=headers, timeout=10 response = client.post(url, json=payload, headers=headers)
) if response.status_code == 200 and response.content:
if ( return response.json()
response.ok and response.text.strip() except Exception:
): # Check if response is valid and non-empty pass
return (
response.json()
) # Parse and return the JSON response def log_agent_data(data_dict: dict) -> None:
except ( """Runs log_agent_data in a separate thread (detached from main thread)."""
requests.exceptions.RequestException, threading.Thread(
requests.exceptions.JSONDecodeError, target=_log_agent_data, args=(data_dict,), daemon=True
): ).start()
return None # Return None if anything goes wrong
async def async_log_agent_data(data_dict: dict) -> dict | None:
# print(log_agent_data(get_user_device_data())) """
Args:
data_dict (dict): The dictionary containing the agent data to be logged.
Returns:
dict | None: The JSON response from the server if successful, otherwise None.
"""
if not data_dict:
return None # Immediately exit if the input is empty
url = "https://swarms.world/api/get-agents/log-agents"
headers = {
"Content-Type": "application/json",
"Authorization": "sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24",
}
data_input = {
"data": data_dict,
"system_data": get_user_device_data(),
"timestamp": datetime.datetime.now(datetime.UTC).isoformat(),
}
async with aiohttp.ClientSession() as session:
try:
async with session.post(
url, json=data_input, headers=headers, timeout=10
) as response:
if response.ok and await response.text():
out = await response.json()
return out
except Exception:
pass

@ -32,26 +32,26 @@ class ToolSet(BaseModel):
tools: List[Tool] tools: List[Tool]
model = ToolSet( # model = ToolSet(
tools=[ # tools=[
Tool( # Tool(
type="function", # type="function",
function=FunctionDefinition( # function=FunctionDefinition(
name="test", # name="test",
description="test", # description="test",
parameters=ParameterSchema( # parameters=ParameterSchema(
type="object", # type="object",
properties={ # properties={
"weather_tool": PropertySchema( # "weather_tool": PropertySchema(
type="string", # type="string",
description="Get the weather in a given location", # description="Get the weather in a given location",
) # )
}, # },
required=["weather_tool"], # required=["weather_tool"],
), # ),
), # ),
), # ),
] # ]
) # )
print(model.model_dump_json(indent=4)) # print(model.model_dump_json(indent=4))

@ -1,26 +0,0 @@
from swarms.utils.loguru_logger import logger
import os
def try_import_agentops(*args, **kwargs):
try:
logger.info("Trying to import agentops")
import agentops
agentops.init(os.getenv("AGENTOPS_API_KEY"), *args, **kwargs)
return "agentops imported successfully."
except ImportError:
logger.error("Could not import agentops")
def end_session_agentops():
try:
logger.info("Trying to end session")
import agentops
agentops.end_session("Success")
return "Session ended successfully."
except ImportError:
logger.error("Could not import agentops")
return "Could not end session."

@ -146,5 +146,5 @@ def auto_check_and_download_package(
return success return success
if __name__ == "__main__": # if __name__ == "__main__":
print(auto_check_and_download_package("torch")) # print(auto_check_and_download_package("torch"))

@ -5,20 +5,6 @@ import warnings
from threading import Thread from threading import Thread
def disable_langchain():
"""
Disables the LangChain deprecation warning.
"""
from langchain_core._api.deprecation import (
LangChainDeprecationWarning,
)
# Ignore LangChainDeprecationWarning
warnings.filterwarnings(
"ignore", category=LangChainDeprecationWarning
)
def disable_logging(): def disable_logging():
""" """
Disables logging for specific modules and sets up file and stream handlers. Disables logging for specific modules and sets up file and stream handlers.
@ -47,7 +33,6 @@ def disable_logging():
"numexpr", "numexpr",
"git", "git",
"wandb.docker.auth", "wandb.docker.auth",
"langchain",
"distutils", "distutils",
"urllib3", "urllib3",
"elasticsearch", "elasticsearch",
@ -80,8 +65,6 @@ def disable_logging():
stream_handler.setLevel(logging.ERROR) stream_handler.setLevel(logging.ERROR)
logging.getLogger().addHandler(stream_handler) logging.getLogger().addHandler(stream_handler)
disable_langchain()
def set_logger_level(logger_name: str) -> None: def set_logger_level(logger_name: str) -> None:
""" """

@ -1,3 +1,4 @@
import yaml
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
@ -6,13 +7,17 @@ def history_output_formatter(
): ):
if type == "list": if type == "list":
return conversation.return_messages_as_list() return conversation.return_messages_as_list()
elif type == "dict": elif type == "dict" or type == "dictionary":
return conversation.to_dict() return conversation.to_dict()
elif type == "string" or type == "str": elif type == "string" or type == "str":
return conversation.get_str() return conversation.get_str()
elif type == "final": elif type == "final" or type == "last":
return conversation.get_final_message() return conversation.get_final_message_content()
elif type == "json": elif type == "json":
return conversation.to_json() return conversation.to_json()
elif type == "all":
return conversation.get_str()
elif type == "yaml":
return yaml.safe_dump(conversation.to_dict(), sort_keys=False)
else: else:
raise ValueError(f"Invalid type: {type}") raise ValueError(f"Invalid type: {type}")

@ -1,3 +1,6 @@
import base64
import requests
import asyncio import asyncio
from typing import List from typing import List
@ -24,6 +27,37 @@ except ImportError:
litellm.ssl_verify = False litellm.ssl_verify = False
def get_audio_base64(audio_source: str) -> str:
"""
Convert audio from a given source to a base64 encoded string.
This function handles both URLs and local file paths. If the audio source is a URL, it fetches the audio data
from the internet. If it is a local file path, it reads the audio data from the specified file.
Args:
audio_source (str): The source of the audio, which can be a URL or a local file path.
Returns:
str: A base64 encoded string representation of the audio data.
Raises:
requests.HTTPError: If the HTTP request to fetch audio data fails.
FileNotFoundError: If the local audio file does not exist.
"""
# Handle URL
if audio_source.startswith(("http://", "https://")):
response = requests.get(audio_source)
response.raise_for_status()
audio_data = response.content
# Handle local file
else:
with open(audio_source, "rb") as file:
audio_data = file.read()
encoded_string = base64.b64encode(audio_data).decode("utf-8")
return encoded_string
class LiteLLM: class LiteLLM:
""" """
This class represents a LiteLLM. This class represents a LiteLLM.
@ -42,6 +76,7 @@ class LiteLLM:
tools_list_dictionary: List[dict] = None, tools_list_dictionary: List[dict] = None,
tool_choice: str = "auto", tool_choice: str = "auto",
parallel_tool_calls: bool = False, parallel_tool_calls: bool = False,
audio: str = None,
*args, *args,
**kwargs, **kwargs,
): ):
@ -65,6 +100,7 @@ class LiteLLM:
self.tools_list_dictionary = tools_list_dictionary self.tools_list_dictionary = tools_list_dictionary
self.tool_choice = tool_choice self.tool_choice = tool_choice
self.parallel_tool_calls = parallel_tool_calls self.parallel_tool_calls = parallel_tool_calls
self.modalities = ["text"]
def _prepare_messages(self, task: str) -> list: def _prepare_messages(self, task: str) -> list:
""" """
@ -87,7 +123,83 @@ class LiteLLM:
return messages return messages
def run(self, task: str, *args, **kwargs): def audio_processing(self, task: str, audio: str):
"""
Process the audio for the given task.
Args:
task (str): The task to be processed.
audio (str): The path or identifier for the audio file.
"""
self.modalities.append("audio")
encoded_string = get_audio_base64(audio)
# Append messages
self.messages.append(
{
"role": "user",
"content": [
{"type": "text", "text": task},
{
"type": "input_audio",
"input_audio": {
"data": encoded_string,
"format": "wav",
},
},
],
}
)
def vision_processing(self, task: str, image: str):
"""
Process the image for the given task.
"""
self.modalities.append("vision")
# Append messages
self.messages.append(
{
"role": "user",
"content": [
{"type": "text", "text": task},
{
"type": "image_url",
"image_url": {
"url": image,
# "detail": "high"
# "format": "image",
},
},
],
}
)
def handle_modalities(
self, task: str, audio: str = None, img: str = None
):
"""
Handle the modalities for the given task.
"""
if audio is not None:
self.audio_processing(task=task, audio=audio)
if img is not None:
self.vision_processing(task=task, image=img)
if audio is not None and img is not None:
self.audio_processing(task=task, audio=audio)
self.vision_processing(task=task, image=img)
def run(
self,
task: str,
audio: str = None,
img: str = None,
*args,
**kwargs,
):
""" """
Run the LLM model for the given task. Run the LLM model for the given task.
@ -103,6 +215,8 @@ class LiteLLM:
messages = self._prepare_messages(task) messages = self._prepare_messages(task)
self.handle_modalities(task=task, audio=audio, img=img)
if self.tools_list_dictionary is not None: if self.tools_list_dictionary is not None:
response = completion( response = completion(
model=self.model_name, model=self.model_name,
@ -111,6 +225,7 @@ class LiteLLM:
temperature=self.temperature, temperature=self.temperature,
max_tokens=self.max_tokens, max_tokens=self.max_tokens,
tools=self.tools_list_dictionary, tools=self.tools_list_dictionary,
modalities=self.modalities,
tool_choice=self.tool_choice, tool_choice=self.tool_choice,
parallel_tool_calls=self.parallel_tool_calls, parallel_tool_calls=self.parallel_tool_calls,
*args, *args,
@ -130,6 +245,7 @@ class LiteLLM:
stream=self.stream, stream=self.stream,
temperature=self.temperature, temperature=self.temperature,
max_tokens=self.max_tokens, max_tokens=self.max_tokens,
modalities=self.modalities,
*args, *args,
**kwargs, **kwargs,
) )

@ -1,92 +0,0 @@
import subprocess
from typing import Any, Dict, List
from swarms.utils.loguru_logger import initialize_logger
from pydantic import BaseModel
from swarms.structs.agent import Agent
logger = initialize_logger(log_folder="pandas_utils")
def display_agents_info(agents: List[Agent]) -> None:
"""
Displays information about all agents in a list using a DataFrame.
:param agents: List of Agent instances.
"""
# Extracting relevant information from each agent
try:
import pandas as pd
except ImportError:
logger.error("Failed to import pandas")
subprocess.run(["pip", "install", "pandas"])
import pandas as pd
agent_data = []
for agent in agents:
try:
agent_info = {
"ID": agent.id,
"Name": agent.agent_name,
"Description": agent.description,
"max_loops": agent.max_loops,
# "Docs": agent.docs,
"System Prompt": agent.system_prompt,
"LLM Model": agent.llm.model_name, # type: ignore
}
agent_data.append(agent_info)
except AttributeError as e:
logger.error(
f"Failed to extract information from agent {agent}: {e}"
)
continue
# Creating a DataFrame to display the data
try:
df = pd.DataFrame(agent_data)
except Exception as e:
logger.error(f"Failed to create DataFrame: {e}")
return
# Displaying the DataFrame
try:
print(df)
except Exception as e:
logger.error(f"Failed to print DataFrame: {e}")
def dict_to_dataframe(data: Dict[str, Any]):
"""
Converts a dictionary into a pandas DataFrame.
:param data: Dictionary to convert.
:return: A pandas DataFrame representation of the dictionary.
"""
try:
import pandas as pd
except ImportError:
logger.error("Failed to import pandas")
subprocess.run(["pip", "install", "pandas"])
import pandas as pd
# Convert dictionary to DataFrame
df = pd.json_normalize(data)
return df
def pydantic_model_to_dataframe(model: BaseModel) -> any:
"""
Converts a Pydantic Base Model into a pandas DataFrame.
:param model: Pydantic Base Model to convert.
:return: A pandas DataFrame representation of the Pydantic model.
"""
# Convert Pydantic model to dictionary
model_dict = model.dict()
# Convert dictionary to DataFrame
df = dict_to_dataframe(model_dict)
return df

@ -0,0 +1,43 @@
import asyncio
from browser_use import Agent
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from swarms import ConcurrentWorkflow
load_dotenv()
class BrowserAgent:
def __init__(self, agent_name: str = "BrowserAgent"):
self.agent_name = agent_name
async def browser_agent_test(self, task: str):
agent = Agent(
task=task,
llm=ChatOpenAI(model="gpt-4o"),
)
result = await agent.run()
return result
def run(self, task: str):
return asyncio.run(self.browser_agent_test(task))
swarm = ConcurrentWorkflow(
agents=[BrowserAgent() for _ in range(3)],
)
swarm.run(
"""
Go to pump.fun.
2. Make an account: use email: "test@test.com" and password: "test1234"
3. Make a coin called and give it a cool description and etc. Fill in the form
4. Sit back and watch the coin grow in value.
"""
)
Loading…
Cancel
Save