pull/622/head
Your Name 2 months ago
parent d73f1a68c4
commit 4b19e40c7b

@ -0,0 +1,126 @@
import os
import json
from pydantic import BaseModel, Field
from swarm_models import OpenAIFunctionCaller
from dotenv import load_dotenv
from typing import Any, List
load_dotenv()
class Flow(BaseModel):
id: str = Field(
description="A unique identifier for the flow. This should be a short, descriptive name that captures the main purpose of the flow. Use - to separate words and make it lowercase."
)
plan: str = Field(
description="The comprehensive plan detailing how the flow will accomplish the given task. This should include the high-level strategy, key milestones, and expected outcomes. The plan should clearly articulate what the overall goal is, what success looks like, and how progress will be measured throughout execution."
)
failures_prediction: str = Field(
description="A thorough analysis of potential failure modes and mitigation strategies. This should identify technical risks, edge cases, error conditions, and possible points of failure in the flow. For each identified risk, include specific preventive measures, fallback approaches, and recovery procedures to ensure robustness and reliability."
)
rationale: str = Field(
description="The detailed reasoning and justification for why this specific flow design is optimal for the given task. This should explain the key architectural decisions, tradeoffs considered, alternatives evaluated, and why this approach best satisfies the requirements. Include both technical and business factors that influenced the design."
)
flow: str = Field(
description="The precise execution flow defining how agents interact and coordinate. Use -> to indicate sequential processing where one agent must complete before the next begins (e.g. agent1 -> agent2 -> agent3). Use , to indicate parallel execution where multiple agents can run simultaneously (e.g. agent1 -> agent2, agent3, agent4). The flow should clearly show the dependencies and parallelization opportunities between agents. You must only use the agent names provided in the task description do not make up new agent names and do not use any other formatting."
)
class AgentRearrangeBuilder(BaseModel):
name: str = Field(
description="The name of the swarm. This should be a short, descriptive name that captures the main purpose of the flow."
)
description: str = Field(
description="A brief description of the swarm. This should be a concise summary of the main purpose of the swarm."
)
flows: List[Flow] = Field(
description="A list of flows that are optimal for the given task. Each flow should be a detailed plan, failure prediction, rationale, and execution flow."
)
swarm_flow: str = Field(
description="The flow defining how each team should communicate and coordinate with eachother.Use -> to indicate sequential processing where one id must complete before the next begins (e.g. team1 -> team2 -> team3). Use , to indicate parallel execution where multiple teams can run simultaneously (e.g. team1 -> team2, team3, team4). The flow should clearly show the dependencies and parallelization opportunities between teams. You must only use the team names provided in the id do not make up new team names and do not use any other formatting."
)
# def flow_generator(task: str) -> Flow:
def setup_model(base_model: BaseModel = Flow):
model = OpenAIFunctionCaller(
system_prompt="""You are an expert flow architect specializing in designing multi-agent workflows. Your role is to analyze tasks and create optimal execution flows that coordinate multiple AI agents effectively.
When given a task, you will:
1. Develop a comprehensive plan breaking down the task into logical steps
2. Carefully consider potential failure modes and build in robust error handling
3. Provide clear rationale for your architectural decisions and agent coordination strategy
4. Design a precise flow showing both sequential dependencies and parallel execution opportunities
Your flows should maximize:
- Efficiency through smart parallelization
- Reliability through thorough error handling
- Clarity through well-structured agent interactions
- Effectiveness through strategic task decomposition
Format your flow using -> for sequential steps and , for parallel execution. Be specific about agent roles and interactions.
""",
base_model=base_model,
openai_api_key=os.getenv("OPENAI_API_KEY"),
temperature=0.5,
)
return model
def generate_flow(task: str) -> Any:
model = setup_model()
flow = model.run(task)
print(json.dumps(flow, indent=4))
return flow
def generate_agent_rearrange(task: str) -> Any:
model = setup_model(base_model=AgentRearrangeBuilder)
flow = model.run(task)
print(json.dumps(flow, indent=4))
return flow
if __name__ == "__main__":
# Basic patient diagnosis flow
# generate_flow("Diagnose a patient's symptoms and create a treatment plan. You have 3 agents to use: Diagnostician, Specialist, CareCoordinator")
# # Complex multi-condition case
# generate_flow("""Handle a complex patient case with multiple chronic conditions requiring ongoing care coordination.
# The patient has diabetes, heart disease, and chronic pain.
# Create a comprehensive diagnosis and treatment plan.
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
# # Emergency trauma case
# generate_flow("""Process an emergency trauma case requiring rapid diagnosis and immediate intervention.
# Patient presents with multiple injuries from a car accident.
# Develop immediate and long-term treatment plans.
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
# # Long-term care planning
# generate_flow("""Design a 6-month care plan for an elderly patient with declining cognitive function.
# Include regular assessments, specialist consultations, and family coordination.
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
# # Mental health assessment
# generate_flow("""Conduct a comprehensive mental health assessment and develop treatment strategy.
# Patient shows signs of depression and anxiety with possible underlying conditions.
# Create both immediate intervention and long-term support plans.
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
generate_agent_rearrange(
"""Build a complete automated hedge fund system.
Design and implement a sophisticated trading strategy incorporating multiple asset classes,
risk management protocols, and automated execution systems.
The system should include:
- Market analysis and research capabilities
- Portfolio optimization and risk management
- Automated trade execution and settlement
- Compliance and regulatory monitoring
- Performance tracking and reporting
- Fund operations and administration
Create a comprehensive architecture that integrates all these components into a fully automated system."""
)

@ -146,7 +146,7 @@ router = SwarmRouter(
# operational_analyst_agent,
],
swarm_type="auto", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
auto_generate_prompts=True,
# auto_generate_prompts=True,
)
# Example usage

@ -92,6 +92,12 @@ Runs multiple agents concurrently with timeout limits.
```python
from swarms import Agent, run_agents_concurrently, run_agents_with_timeout, run_agents_with_different_tasks
from swarm_models import OpenAIChat
model = OpenAIChat(
model_name="gpt-4o-mini",
temperature=0.0
)
# Initialize agents
agents = [

@ -32,7 +32,7 @@ agent = Agent(
# output_type="json",
output_type="json", # "json", "dict", "csv" OR "string" soon "yaml" and
streaming_on=False,
auto_generate_prompt=True,
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
)

@ -0,0 +1,116 @@
swarms [ master][✘!?][🐳 desktop-linux][📦 v6.0.0][🐍 v3.12.6][☁️ (us-east-1)][☁️ kye@swarms.world(us-central1)]
󰂃 10% /usr/local/bin/python3.12 /Users/swarms_wd/Desktop/swarms/auto_flow.py
{
"id": "AutomatedHedgeFundSystem",
"plan": "The goal is to build a fully automated hedge fund system that integrates multiple components including market analysis, portfolio optimization, trade execution, compliance monitoring, performance tracking, and fund operations. The system will be divided into several key modules, each responsible for specific tasks. The overall success will be measured by the system's ability to consistently execute profitable trades, manage risks effectively, comply with regulations, and provide comprehensive reporting. Key milestones include setting up data feeds for market analysis, developing algorithms for portfolio optimization, implementing automated trade execution protocols, and establishing compliance and reporting mechanisms.",
"failures_prediction": "Potential failure modes include incorrect market data leading to poor trading decisions, algorithmic errors in portfolio optimization, failures in trade execution systems, compliance breaches, and inaccurate reporting. To mitigate these risks, robust data validation procedures will be implemented, algorithms will be rigorously tested in simulated environments, trade execution systems will include fail-safes and redundancies, compliance checks will be automated and regularly audited, and reporting systems will include cross-checks and validation processes. Regular monitoring and updates will ensure the system remains reliable and accurate.",
"rationale": "This flow design is optimal because it breaks down the complex task of building an automated hedge fund into manageable components, allowing for specialized agents to focus on distinct functions. Parallelization is maximized where possible, such as in market analysis and portfolio optimization, to increase efficiency and speed. Sequential dependencies ensure that critical tasks like compliance and trade execution follow necessary preparatory steps. This design balances the need for speed in trading with the necessity of thorough analysis and compliance, ensuring both profitability and adherence to regulations.",
"flow": "AgentMarketAnalysis -> AgentPortfolioOptimization, AgentComplianceMonitoring -> AgentTradeExecution -> AgentPerformanceTracking, AgentFundOperations"
}
swarms [ master][✘!?][🐳 desktop-linux][📦 v6.0.0][🐍 v3.12.6][☁️ (us-east-1)][☁️ kye@swarms.world(us-central1)][⏱ 9s]
󰂃 10% /usr/local/bin/python3.12 /Users/swarms_wd/Desktop/swarms/auto_flow.py
{
"name": "Automated Hedge Fund System",
"description": "A fully automated system for managing a hedge fund, integrating market analysis, portfolio optimization, automated trade execution, compliance, and performance tracking.",
"flows": [
{
"id": "Market_Analysis_and_Research",
"plan": "Develop a system that continuously gathers data from multiple financial markets, processes this data to identify trends and patterns, and generates insights for trading strategies. The system should support multiple asset classes including equities, fixed income, commodities, and currencies.",
"failures_prediction": "Data quality issues may arise, leading to inaccurate analysis. To mitigate, implement data validation and cleansing processes. Additionally, ensure redundancy in data sources to prevent single points of failure. Algorithmic biases can distort insights; regular audits and updates to algorithms are necessary.",
"rationale": "Market analysis is foundational to informed trading strategies. By implementing robust data collection and processing, the system ensures timely and accurate insights, which are crucial for competitive advantage in trading.",
"flow": "DataCollector -> DataProcessor -> TrendAnalyzer -> InsightGenerator"
},
{
"id": "Portfolio_Optimization_and_Risk_Management",
"plan": "Implement a system that uses the insights from market analysis to optimize the portfolio. This involves balancing risk and return, adhering to investment guidelines, and dynamically adjusting the portfolio in response to market changes.",
"failures_prediction": "Risk models might not capture extreme market events, leading to unexpected losses. Regular stress testing and scenario analysis are essential. Portfolio rebalancing might incur high transaction costs; optimization algorithms should account for these.",
"rationale": "Effective portfolio management maximizes returns while controlling risk. By continuously optimizing the portfolio, the system can adapt to market conditions and investor goals, ensuring long-term fund performance.",
"flow": "InsightGenerator -> PortfolioOptimizer -> RiskManager -> Rebalancer"
},
{
"id": "Automated_Trade_Execution_and_Settlement",
"plan": "Design a system that executes trades automatically based on portfolio optimization outputs. It should ensure trades are executed at optimal prices and settled efficiently across multiple asset classes.",
"failures_prediction": "Execution failures can occur due to connectivity issues or market volatility. Implement fail-safes such as alternative trading venues and pre-trade checks. Settlement failures require reconciliation processes to ensure all trades are accurately recorded.",
"rationale": "Automation in trade execution reduces latency and human error, ensuring trades are conducted efficiently and at the best possible prices. This is critical for maintaining competitive edge and operational efficiency.",
"flow": "Rebalancer -> TradeExecutor -> SettlementProcessor"
},
{
"id": "Compliance_and_Regulatory_Monitoring",
"plan": "Establish a system that monitors all trading activities for compliance with relevant regulations and internal policies. It should generate alerts for any potential violations and maintain detailed records for audits.",
"failures_prediction": "Non-compliance can lead to legal penalties and reputational damage. Implement real-time monitoring and alert systems, and conduct regular compliance audits to ensure adherence to regulations.",
"rationale": "Regulatory compliance is non-negotiable in financial markets. A robust monitoring system protects the fund from legal risks and maintains investor trust.",
"flow": "TradeExecutor -> ComplianceMonitor -> AlertSystem"
},
{
"id": "Performance_Tracking_and_Reporting",
"plan": "Create a system that tracks the performance of the hedge fund, analyzing returns, risks, and other key metrics. It should generate regular reports for stakeholders, providing insights into fund performance and areas for improvement.",
"failures_prediction": "Inaccurate performance data can mislead stakeholders. Ensure data integrity through validation processes and cross-checks. Reporting delays can frustrate stakeholders; automate report generation to ensure timeliness.",
"rationale": "Performance tracking provides transparency and accountability, essential for stakeholder trust and strategic decision-making. Regular reporting helps in assessing strategy effectiveness and making informed adjustments.",
"flow": "SettlementProcessor -> PerformanceTracker -> ReportGenerator"
},
{
"id": "Fund_Operations_and_Administration",
"plan": "Develop a system that handles the day-to-day operations of the hedge fund, including investor relations, fund accounting, and administrative tasks. Ensure seamless integration with other components of the hedge fund system.",
"failures_prediction": "Operational bottlenecks can disrupt fund activities. Implement workflow automation and task prioritization to enhance efficiency. Ensure data consistency across systems to prevent administrative errors.",
"rationale": "Efficient fund operations are crucial for smooth functioning and scalability of the hedge fund. By automating routine tasks, the system allows for focus on strategic activities and growth.",
"flow": "ReportGenerator -> FundAdministrator -> InvestorRelations"
}
]
}
swarms [ master][✘!?][🐳 desktop-linux][📦 v6.0.0][🐍 v3.12.6][☁️ (us-east-1)][☁️ kye@swarms.world(us-central1)][⏱ 26s]
󰂃 10% /usr/local/bin/python3.12 /Users/swarms_wd/Desktop/swarms/auto_flow.py
{
"name": "Automated Hedge Fund System",
"description": "A comprehensive architecture for a fully automated hedge fund system integrating market analysis, portfolio optimization, automated execution, compliance monitoring, performance tracking, and fund operations.",
"flows": [
{
"id": "Market Analysis and Research",
"plan": "Develop a robust market analysis module that gathers data from multiple sources, processes it using machine learning algorithms, and provides actionable insights. This module will continuously monitor market trends, sentiment, and economic indicators to inform trading strategies.",
"failures_prediction": "Potential failures include data source outages, incorrect data processing, and machine learning model inaccuracies. Mitigation strategies involve using redundant data sources, implementing data validation checks, and continuously updating and retraining models.",
"rationale": "Market analysis is the foundation of a successful trading strategy. By leveraging multiple data sources and advanced algorithms, the system can generate high-quality insights that drive profitable trades. The design prioritizes reliability and accuracy to ensure consistent performance.",
"flow": "DataCollector -> DataProcessor -> MLModelTrainer -> InsightGenerator"
},
{
"id": "Portfolio Optimization and Risk Management",
"plan": "Create a portfolio optimization engine that uses quantitative models to allocate assets efficiently. Integrate risk management protocols to monitor and mitigate exposure to market risks, ensuring the portfolio aligns with the fund's risk appetite and investment goals.",
"failures_prediction": "Risks include model inaccuracies, unexpected market events, and correlation breakdowns. Preventive measures include stress testing models, implementing real-time risk monitoring, and setting predefined risk thresholds with automated rebalancing.",
"rationale": "Optimizing the portfolio is crucial for maximizing returns while controlling risk. By integrating risk management, the system ensures that the portfolio remains resilient to market fluctuations, aligning with overall investment strategies.",
"flow": "PortfolioOptimizer -> RiskAnalyzer -> RiskMitigationEngine"
},
{
"id": "Automated Trade Execution and Settlement",
"plan": "Design an automated trade execution system that interfaces with multiple exchanges, executes trades based on predefined strategies, and handles settlement processes. Ensure the system is capable of high-frequency trading and adapts to market conditions.",
"failures_prediction": "Failures can occur due to exchange connectivity issues, execution delays, or strategy malfunctions. Mitigation involves implementing failover protocols, real-time monitoring of execution quality, and adaptive algorithms that adjust to market conditions.",
"rationale": "Automated execution is essential for capitalizing on market opportunities quickly and efficiently. The system's ability to handle high-frequency trades and adapt to changing conditions is critical for maintaining a competitive edge.",
"flow": "TradeStrategyEngine -> ExecutionManager -> SettlementProcessor"
},
{
"id": "Compliance and Regulatory Monitoring",
"plan": "Implement a compliance monitoring system that tracks all trading activities, ensures adherence to regulations, and generates reports for regulatory bodies. Incorporate automated alerts for any compliance breaches or suspicious activities.",
"failures_prediction": "Potential issues include regulatory changes, false positives in alerts, and reporting errors. Strategies to address these include regular updates to compliance rules, fine-tuning alert thresholds, and automated report validation checks.",
"rationale": "Compliance is non-negotiable in the hedge fund industry. An automated system reduces the risk of human error and ensures that the fund operates within legal boundaries, protecting against fines and reputational damage.",
"flow": "TradeMonitor -> ComplianceChecker -> AlertSystem -> ReportGenerator"
},
{
"id": "Performance Tracking and Reporting",
"plan": "Develop a performance tracking system that evaluates fund performance against benchmarks, generates detailed reports, and provides insights into fund health. Ensure the system supports real-time performance analytics and historical data analysis.",
"failures_prediction": "Challenges include data inaccuracies, benchmark mismatches, and report generation delays. Mitigation involves implementing data validation, aligning benchmarks with investment goals, and optimizing report generation processes.",
"rationale": "Tracking performance is vital for assessing the fund's success and making informed decisions. The system's ability to provide real-time insights and comprehensive reports supports strategic planning and investor communications.",
"flow": "PerformanceAnalyzer -> BenchmarkComparator -> ReportGenerator"
},
{
"id": "Fund Operations and Administration",
"plan": "Create an operations module that handles fund administration tasks such as investor relations, fee calculations, and financial reporting. Ensure seamless integration with other systems for efficient data flow and operations management.",
"failures_prediction": "Risks include operational inefficiencies, data integration issues, and incorrect calculations. Address these by streamlining processes, ensuring robust data integration, and implementing checks for accuracy in calculations.",
"rationale": "Efficient fund operations are essential for smooth day-to-day management and investor satisfaction. By automating administrative tasks, the system reduces manual workload and enhances operational efficiency.",
"flow": "InvestorRelationsManager -> FeeCalculator -> FinancialReportGenerator"
}
],
"swarm_flow": "Market Analysis and Research -> Portfolio Optimization and Risk Management -> Automated Trade Execution and Settlement -> Compliance and Regulatory Monitoring -> Performance Tracking and Reporting -> Fund Operations and Administration"
}
swarms [ master][✘!?][🐳 desktop-linux][📦 v6.0.0][🐍 v3.12.6][☁️ (us-east-1)][☁️ kye@swarms.world(us-central1)][⏱ 32s]
󰂃 9%

@ -77,6 +77,7 @@ swarms-cloud = ">=0.4.4,<5"
aiofiles = "*"
swarm-models = "*"
clusterops = "*"
chromadb = "*"
[tool.poetry.scripts]
swarms = "swarms.cli.main:main"

@ -1,6 +1,12 @@
import concurrent.futures
from dotenv import load_dotenv
load_dotenv()
from swarms.telemetry.bootup import bootup # noqa: E402, F403
from swarms.telemetry.sentry_active import activate_sentry
from swarms.telemetry.sentry_active import (
activate_sentry,
) # noqa: E402
# Use ThreadPoolExecutor to run bootup and activate_sentry concurrently
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
@ -10,8 +16,8 @@ with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
from swarms.agents import * # noqa: E402, F403
from swarms.artifacts import * # noqa: E402, F403
from swarms.prompts import * # noqa: E402, F403
from swarms.schemas import * # noqa: E402, F403
from swarms.structs import * # noqa: E402, F403
from swarms.telemetry import * # noqa: E402, F403
from swarms.tools import * # noqa: E402, F403
from swarms.utils import * # noqa: E402, F403
from swarms.schemas import * # noqa: E402, F403

@ -1,13 +1,12 @@
import os
from typing import Any, Callable, Dict, List, Tuple, Union
import yaml
from dotenv import load_dotenv
from loguru import logger
from typing import Callable, List, Union, Tuple, Dict, Any
from swarms.structs.agent import Agent
from swarms.structs.swarm_router import SwarmRouter
load_dotenv()
def create_agents_from_yaml(
model: Callable = None,
@ -50,152 +49,168 @@ def create_agents_from_yaml(
FileNotFoundError: If the specified YAML file is not found.
ValueError: If the YAML configuration is invalid or if an invalid return_type is specified.
"""
logger.info(f"Checking if the YAML file {yaml_file} exists...")
if not os.path.exists(yaml_file):
logger.error(f"YAML file {yaml_file} not found.")
raise FileNotFoundError(f"YAML file {yaml_file} not found.")
logger.info(f"Loading YAML file {yaml_file}")
with open(yaml_file, "r") as file:
config = yaml.safe_load(file)
if "agents" not in config:
logger.error(
"The YAML configuration does not contain 'agents'."
)
raise ValueError(
"The YAML configuration does not contain 'agents'."
try:
logger.info(
f"Checking if the YAML file {yaml_file} exists..."
)
agents = []
task_results = []
if not os.path.exists(yaml_file):
logger.error(f"YAML file {yaml_file} not found.")
raise FileNotFoundError(
f"YAML file {yaml_file} not found."
)
# Create agents
for agent_config in config["agents"]:
logger.info(f"Creating agent: {agent_config['agent_name']}")
logger.info(f"Loading YAML file {yaml_file}")
with open(yaml_file, "r") as file:
config = yaml.safe_load(file)
if "system_prompt" not in agent_config:
if "agents" not in config:
logger.error(
f"System prompt is missing for agent: {agent_config['agent_name']}"
"The YAML configuration does not contain 'agents'."
)
raise ValueError(
f"System prompt is missing for agent: {agent_config['agent_name']}"
"The YAML configuration does not contain 'agents'."
)
agent = Agent(
agent_name=agent_config["agent_name"],
system_prompt=agent_config["system_prompt"],
llm=model,
max_loops=agent_config.get("max_loops", 1),
autosave=agent_config.get("autosave", True),
dashboard=agent_config.get("dashboard", False),
verbose=agent_config.get("verbose", False),
dynamic_temperature_enabled=agent_config.get(
"dynamic_temperature_enabled", False
),
saved_state_path=agent_config.get("saved_state_path"),
user_name=agent_config.get("user_name", "default_user"),
retry_attempts=agent_config.get("retry_attempts", 1),
context_length=agent_config.get("context_length", 100000),
return_step_meta=agent_config.get(
"return_step_meta", False
),
output_type=agent_config.get("output_type", "str"),
auto_generate_prompt=agent_config.get(
"auto_generate_prompt", "False"
),
*args,
**kwargs,
)
agents = []
task_results = []
logger.info(
f"Agent {agent_config['agent_name']} created successfully."
)
agents.append(agent)
# Create SwarmRouter if swarm_architecture is present
swarm_router = None
if "swarm_architecture" in config:
swarm_config = config["swarm_architecture"]
swarm_router = SwarmRouter(
name=swarm_config["name"],
description=swarm_config["description"],
max_loops=swarm_config["max_loops"],
agents=agents,
swarm_type=swarm_config["swarm_type"],
task=swarm_config.get("task"),
flow=swarm_config.get("flow"),
autosave=swarm_config.get("autosave"),
return_json=swarm_config.get("return_json"),
*args,
**kwargs,
)
logger.info(
f"SwarmRouter '{swarm_config['name']}' created successfully."
)
# Create agents
for agent_config in config["agents"]:
logger.info(
f"Creating agent: {agent_config['agent_name']}"
)
# Define function to run SwarmRouter
def run_swarm_router(
task: str = (
swarm_config.get("task")
if "swarm_architecture" in config
else None
),
):
if swarm_router:
try:
output = swarm_router.run(task)
print(output)
logger.info(
f"Output for SwarmRouter '{swarm_config['name']}': {output}"
)
return output
except Exception as e:
if "system_prompt" not in agent_config:
logger.error(
f"Error running task for SwarmRouter '{swarm_config['name']}': {e}"
f"System prompt is missing for agent: {agent_config['agent_name']}"
)
raise e
else:
logger.error("SwarmRouter not created.")
raise ValueError("SwarmRouter not created.")
# Handle return types
if return_type == "auto":
if swarm_router:
return swarm_router
elif len(agents) == 1:
return agents[0]
else:
return agents
elif return_type == "swarm":
return (
swarm_router
if swarm_router
else (agents[0] if len(agents) == 1 else agents)
)
elif return_type == "agents":
return agents[0] if len(agents) == 1 else agents
elif return_type == "both":
return (
swarm_router
if swarm_router
else agents[0] if len(agents) == 1 else agents
), agents
elif return_type == "tasks":
if not task_results:
logger.warning(
"No tasks were executed. Returning empty list."
raise ValueError(
f"System prompt is missing for agent: {agent_config['agent_name']}"
)
agent = Agent(
agent_name=agent_config["agent_name"],
system_prompt=agent_config["system_prompt"],
llm=model,
max_loops=agent_config.get("max_loops", 1),
autosave=agent_config.get("autosave", True),
dashboard=agent_config.get("dashboard", False),
verbose=agent_config.get("verbose", False),
dynamic_temperature_enabled=agent_config.get(
"dynamic_temperature_enabled", False
),
saved_state_path=agent_config.get("saved_state_path"),
user_name=agent_config.get(
"user_name", "default_user"
),
retry_attempts=agent_config.get("retry_attempts", 1),
context_length=agent_config.get(
"context_length", 100000
),
return_step_meta=agent_config.get(
"return_step_meta", False
),
output_type=agent_config.get("output_type", "str"),
auto_generate_prompt=agent_config.get(
"auto_generate_prompt", "False"
),
*args,
**kwargs,
)
return task_results
elif return_type == "run_swarm":
if swarm_router:
return run_swarm_router()
else:
logger.error("Cannot run swarm: SwarmRouter not created.")
raise ValueError(
"Cannot run swarm: SwarmRouter not created."
logger.info(
f"Agent {agent_config['agent_name']} created successfully."
)
agents.append(agent)
# Create SwarmRouter if swarm_architecture is present
swarm_router = None
if "swarm_architecture" in config:
swarm_config = config["swarm_architecture"]
swarm_router = SwarmRouter(
name=swarm_config["name"],
description=swarm_config["description"],
max_loops=swarm_config["max_loops"],
agents=agents,
swarm_type=swarm_config["swarm_type"],
task=swarm_config.get("task"),
flow=swarm_config.get("flow"),
autosave=swarm_config.get("autosave"),
return_json=swarm_config.get("return_json"),
*args,
**kwargs,
)
else:
logger.error(f"Invalid return_type: {return_type}")
raise ValueError(f"Invalid return_type: {return_type}")
logger.info(
f"SwarmRouter '{swarm_config['name']}' created successfully."
)
# Define function to run SwarmRouter
def run_swarm_router(
task: str = (
swarm_config.get("task")
if "swarm_architecture" in config
else None
),
):
if swarm_router:
try:
output = swarm_router.run(task)
print(output)
logger.info(
f"Output for SwarmRouter '{swarm_config['name']}': {output}"
)
return output
except Exception as e:
logger.error(
f"Error running task for SwarmRouter '{swarm_config['name']}': {e}"
)
raise e
else:
logger.error("SwarmRouter not created.")
raise ValueError("SwarmRouter not created.")
# Handle return types
if return_type == "auto":
if swarm_router:
return swarm_router
elif len(agents) == 1:
return agents[0]
else:
return agents
elif return_type == "swarm":
return (
swarm_router
if swarm_router
else (agents[0] if len(agents) == 1 else agents)
)
elif return_type == "agents":
return agents[0] if len(agents) == 1 else agents
elif return_type == "both":
return (
swarm_router
if swarm_router
else agents[0] if len(agents) == 1 else agents
), agents
elif return_type == "tasks":
if not task_results:
logger.warning(
"No tasks were executed. Returning empty list."
)
return task_results
elif return_type == "run_swarm":
if swarm_router:
return run_swarm_router()
else:
logger.error(
"Cannot run swarm: SwarmRouter not created."
)
raise ValueError(
"Cannot run swarm: SwarmRouter not created."
)
else:
logger.error(f"Invalid return_type: {return_type}")
raise ValueError(f"Invalid return_type: {return_type}")
except Exception as e:
logger.error(f"An error occurred: {e}")
raise e

@ -59,13 +59,11 @@ from swarms.structs.utils import (
find_token_in_text,
parse_tasks,
)
from swarms.structs.yaml_model import (
YamlModel,
create_yaml_schema_from_dict,
get_type_name,
pydantic_type_to_yaml_schema,
from swarms.structs.swarm_router import (
SwarmRouter,
SwarmType,
swarm_router,
)
from swarms.structs.swarm_router import SwarmRouter, SwarmType
from swarms.structs.swarm_arange import SwarmRearrange
from swarms.structs.multi_agent_exec import (
run_agents_concurrently,
@ -108,10 +106,6 @@ __all__ = [
"find_agent_by_id",
"find_token_in_text",
"parse_tasks",
"YamlModel",
"create_yaml_schema_from_dict",
"get_type_name",
"pydantic_type_to_yaml_schema",
"MixtureOfAgents",
"GraphWorkflow",
"Node",
@ -148,4 +142,5 @@ __all__ = [
"run_agents_with_different_tasks",
"run_agent_with_timeout",
"run_agents_with_resource_monitoring",
"swarm_router",
]

@ -7,6 +7,7 @@ from pydantic import BaseModel, Field
from typing import Optional
from datetime import datetime
from swarms.schemas.agent_step_schemas import ManySteps
import tenacity
datetime_stamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
@ -46,6 +47,7 @@ class RoundRobinSwarm(BaseSwarm):
verbose (bool, optional): Flag to enable verbose mode. Defaults to False.
max_loops (int, optional): Maximum number of loops to run. Defaults to 1.
callback (callable, optional): Callback function to be called after each loop. Defaults to None.
return_json_on (bool, optional): Flag to return the metadata as a JSON object. Defaults to False.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
@ -69,39 +71,80 @@ class RoundRobinSwarm(BaseSwarm):
max_loops: int = 1,
callback: callable = None,
return_json_on: bool = False,
max_retries: int = 3,
*args,
**kwargs,
):
super().__init__(
name=name,
description=description,
agents=agents,
*args,
**kwargs,
)
self.name = name
self.description = description
self.agents = agents
self.verbose = verbose
self.max_loops = max_loops
self.callback = callback
self.return_json_on = return_json_on
self.index = 0
# Store the metadata for the run
self.output_schema = MetadataSchema(
name=self.name,
swarm_id=datetime_stamp,
task="",
description=self.description,
agent_outputs=[],
timestamp=datetime_stamp,
max_loops=self.max_loops,
)
# Set the max loops for every agent
for agent in self.agents:
agent.max_loops = random.randint(1, 5)
try:
super().__init__(
name=name,
description=description,
agents=agents,
*args,
**kwargs,
)
self.name = name
self.description = description
self.agents = agents or []
self.verbose = verbose
self.max_loops = max_loops
self.callback = callback
self.return_json_on = return_json_on
self.index = 0
self.max_retries = max_retries
# Store the metadata for the run
self.output_schema = MetadataSchema(
name=self.name,
swarm_id=datetime_stamp,
task="",
description=self.description,
agent_outputs=[],
timestamp=datetime_stamp,
max_loops=self.max_loops,
)
# Set the max loops for every agent
if self.agents:
for agent in self.agents:
agent.max_loops = random.randint(1, 5)
logger.info(
f"Successfully initialized {self.name} with {len(self.agents)} agents"
)
except Exception as e:
logger.error(
f"Failed to initialize {self.name}: {str(e)}"
)
raise
@tenacity.retry(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
retry=tenacity.retry_if_exception_type(Exception),
before_sleep=lambda retry_state: logger.info(
f"Retrying in {retry_state.next_action.sleep} seconds..."
),
)
def _execute_agent(
self, agent: Agent, task: str, *args, **kwargs
) -> str:
"""Execute a single agent with retries and error handling"""
try:
logger.info(
f"Running Agent {agent.agent_name} on task: {task}"
)
result = agent.run(task, *args, **kwargs)
self.output_schema.agent_outputs.append(
agent.agent_output
)
return result
except Exception as e:
logger.error(
f"Error executing agent {agent.agent_name}: {str(e)}"
)
raise
def run(self, task: str, *args, **kwargs):
"""
@ -116,51 +159,62 @@ class RoundRobinSwarm(BaseSwarm):
Any: The result of the task execution.
Raises:
ValueError: If no agents are configured
Exception: If an exception occurs during task execution.
"""
if not self.agents:
logger.error("No agents configured for the swarm")
raise ValueError("No agents configured for the swarm")
try:
result = task
self.output_schema.task = task
n = len(self.agents)
logger.info(f"Running the task {task} on {n} agents.")
logger.info(
f"Starting round-robin execution with task '{task}' on {n} agents"
)
for loop in range(self.max_loops):
logger.debug(
f"Starting loop {loop + 1}/{self.max_loops}"
)
for _ in range(n):
current_agent = self.agents[self.index]
try:
logger.info(
f"Running Agent {current_agent.agent_name} on task {result}"
)
result = current_agent.run(
result, *args, **kwargs
result = self._execute_agent(
current_agent, result, *args, **kwargs
)
finally:
self.index = (self.index + 1) % n
# Add agent schema to output
self.output_schema.agent_outputs.append(
current_agent.agent_output
)
if self.callback:
logger.debug(
f"Executing callback for loop {loop + 1}"
)
try:
self.callback(loop, result)
except Exception as e:
logger.error(
f"Handling an exception for {current_agent.name}: {e}"
f"Callback execution failed: {str(e)}"
)
raise e
finally:
self.index = (
self.index + 1
) % n # Increment and wrap around the index
if self.callback:
logger.info(
f"Calling the callback function for loop {loop}"
)
self.callback(loop, result)
logger.success(
f"Successfully completed {self.max_loops} loops of round-robin execution"
)
if self.return_json_on:
return self.export_metadata()
else:
return result
return result
except Exception as e:
logger.error(f"An error occurred: {e}")
return e
logger.error(f"Round-robin execution failed: {str(e)}")
raise
def export_metadata(self):
return self.output_schema.model_dump_json(indent=4)
"""Export the execution metadata as JSON"""
try:
return self.output_schema.model_dump_json(indent=4)
except Exception as e:
logger.error(f"Failed to export metadata: {str(e)}")
raise

@ -455,3 +455,81 @@ class SwarmRouter:
]
results = [future.result() for future in futures]
return results
def swarm_router(
name: str = "swarm-router",
description: str = "Routes your task to the desired swarm",
max_loops: int = 1,
agents: List[Union[Agent, Callable]] = [],
swarm_type: SwarmType = "SequentialWorkflow", # "SpreadSheetSwarm" # "auto"
autosave: bool = False,
flow: str = None,
return_json: bool = True,
auto_generate_prompts: bool = False,
task: str = None,
*args,
**kwargs,
) -> SwarmRouter:
"""
Create and run a SwarmRouter instance with the given configuration.
Args:
name (str, optional): Name of the swarm router. Defaults to "swarm-router".
description (str, optional): Description of the router. Defaults to "Routes your task to the desired swarm".
max_loops (int, optional): Maximum number of execution loops. Defaults to 1.
agents (List[Union[Agent, Callable]], optional): List of agents or callables. Defaults to [].
swarm_type (SwarmType, optional): Type of swarm to use. Defaults to "SequentialWorkflow".
autosave (bool, optional): Whether to autosave results. Defaults to False.
flow (str, optional): Flow configuration. Defaults to None.
return_json (bool, optional): Whether to return results as JSON. Defaults to True.
auto_generate_prompts (bool, optional): Whether to auto-generate prompts. Defaults to False.
task (str, optional): Task to execute. Defaults to None.
*args: Additional positional arguments passed to SwarmRouter.run()
**kwargs: Additional keyword arguments passed to SwarmRouter.run()
Returns:
Any: Result from executing the swarm router
Raises:
ValueError: If invalid arguments are provided
Exception: If an error occurs during router creation or task execution
"""
try:
logger.info(
f"Creating SwarmRouter with name: {name}, swarm_type: {swarm_type}"
)
if not agents:
logger.warning(
"No agents provided, router may have limited functionality"
)
if task is None:
logger.warning("No task provided")
swarm_router = SwarmRouter(
name=name,
description=description,
max_loops=max_loops,
agents=agents,
swarm_type=swarm_type,
autosave=autosave,
flow=flow,
return_json=return_json,
auto_generate_prompts=auto_generate_prompts,
)
logger.info(f"Executing task with SwarmRouter: {task}")
result = swarm_router.run(task, *args, **kwargs)
logger.info("Task execution completed successfully")
return result
except ValueError as e:
logger.error(
f"Invalid arguments provided to swarm_router: {str(e)}"
)
raise
except Exception as e:
logger.error(f"Error in swarm_router execution: {str(e)}")
raise

@ -1,232 +0,0 @@
from pydantic import BaseModel, Field
import yaml
import json
from swarms.utils.loguru_logger import logger
from typing import Any, Dict
from typing import Type
from dataclasses import is_dataclass, fields
def get_type_name(typ: Type) -> str:
"""Map Python types to simple string representations."""
if hasattr(typ, "__name__"):
return typ.__name__
return str(typ)
def create_yaml_schema_from_dict(
data: Dict[str, Any], model_class: Type
) -> str:
"""
Generate a YAML schema based on a dictionary and a class (can be a Pydantic model, regular class, or dataclass).
Args:
data: The dictionary with key-value pairs where keys are attribute names and values are example data.
model_class: The class which the data should conform to, used for obtaining type information.
Returns:
A string containing the YAML schema.
Example usage:
>>> data = {'name': 'Alice', 'age: 30, 'is_active': True}
>>> print(create_yaml_schema_from_dict(data, User))
"""
schema = {}
if is_dataclass(model_class):
for field in fields(model_class):
schema[field.name] = {
"type": get_type_name(field.type),
"default": (
field.default
if field.default != field.default_factory
else None
),
"description": field.metadata.get(
"description", "No description provided"
),
}
elif isinstance(model_class, BaseModel):
for field_name, model_field in model_class.__fields__.items():
field_info = model_field.field_info
schema[field_name] = {
"type": get_type_name(model_field.outer_type_),
"default": field_info.default,
"description": (
field_info.description
or "No description provided."
),
}
else:
# Fallback for regular classes (non-dataclass, non-Pydantic)
for attr_name, attr_value in data.items():
attr_type = type(attr_value)
schema[attr_name] = {
"type": get_type_name(attr_type),
"description": "No description provided",
}
return yaml.safe_dump(schema, sort_keys=False)
def pydantic_type_to_yaml_schema(pydantic_type):
"""
Map Pydantic types to YAML schema types.
Args:
pydantic_type (type): The Pydantic type to be mapped.
Returns:
str: The corresponding YAML schema type.
"""
type_mapping = {
int: "integer",
float: "number",
str: "string",
bool: "boolean",
list: "array",
dict: "object",
}
# For more complex types or generics, you would expand this mapping
base_type = getattr(pydantic_type, "__origin__", pydantic_type)
if base_type is None:
base_type = pydantic_type
return type_mapping.get(base_type, "string")
class YamlModel(BaseModel):
"""
A Pydantic model class for working with YAML data.
Example usage:
# Example usage with an extended YamlModel
>>> class User(YamlModel):
name: str
age: int
is_active: bool
# Create an instance of the User model
>>> user = User(name="Alice", age=30, is_active=True)
# Serialize the User instance to YAML and print it
>>> print(user.to_yaml())
# Convert JSON to YAML and print
>>> json_string = '{"name": "Bob", "age": 25, "is_active": false}'
>>> print(YamlModel.json_to_yaml(json_string))
# Save the User instance to a YAML file
>>> user.save_to_yaml('user.yaml')
"""
input_dict: Dict[str, Any] = Field(
None,
title="Data",
description="The data to be serialized to YAML.",
)
def to_yaml(self):
"""
Serialize the Pydantic model instance to a YAML string.
"""
return yaml.safe_dump(self.input_dict, sort_keys=False)
def from_yaml(self, cls, yaml_str: str):
"""
Create an instance of the class from a YAML string.
Args:
yaml_str (str): The YAML string to parse.
Returns:
cls: An instance of the class with attributes populated from the YAML data.
Returns None if there was an error loading the YAML data.
"""
try:
data = yaml.safe_load(yaml_str)
return cls(**data)
except ValueError as error:
logger.error(f"Error loading YAML data: {error}")
return None
@staticmethod
def json_to_yaml(self, json_str: str):
"""
Convert a JSON string to a YAML string.
"""
data = json.loads(
json_str
) # Convert JSON string to dictionary
return yaml.dump(data)
def save_to_yaml(self, filename: str):
"""
Save the Pydantic model instance as a YAML file.
"""
yaml_data = self.to_yaml()
with open(filename, "w") as file:
file.write(yaml_data)
# TODO: Implement a method to create a YAML schema from the model fields
# @classmethod
# def create_yaml_schema(cls):
# """
# Generate a YAML schema based on the fields of the given BaseModel Class.
# Args:
# cls: The class for which the YAML schema is generated.
# Returns:
# A YAML representation of the schema.
# """
# schema = {}
# for field_name, model_field in cls.model_fields.items(): # Use model_fields
# field_type = model_field.type_ # Assuming type_ for field type access
# field_info = model_field # FieldInfo object already
# schema[field_name] = {
# 'type': pydantic_type_to_yaml_schema(field_type),
# 'description': field_info.description or "No description provided."
# }
# if field_info is not None: # Check for default value directly
# schema[field_name]['default'] = field_info.default
# return yaml.safe_dump(schema, sort_keys=False)
def create_yaml_schema_from_dict(
self, data: Dict[str, Any], model_class: Type
) -> str:
"""
Generate a YAML schema based on a dictionary and a class (can be a Pydantic model, regular class, or dataclass).
Args:
data: The dictionary with key-value pairs where keys are attribute names and values are example data.
model_class: The class which the data should conform to, used for obtaining type information.
Returns:
A string containing the YAML schema.
Example usage:
>>> data = {'name': 'Alice', 'age: 30, 'is_active': True}
"""
return create_yaml_schema_from_dict(data, model_class)
def yaml_to_dict(self, yaml_str: str):
"""
Convert a YAML string to a Python dictionary.
"""
return yaml.safe_load(yaml_str)
def dict_to_yaml(self, data: Dict[str, Any]):
"""
Convert a Python dictionary to a YAML string.
"""
return yaml.safe_dump(data, sort_keys=False)
# dict = {'name': 'Alice', 'age': 30, 'is_active': True}
# # Comvert the dictionary to a YAML schema dict to yaml
# yaml_model = YamlModel().dict_to_yaml(dict)
# print(yaml_model)

@ -1,9 +1,7 @@
import os
from dotenv import load_dotenv
import sentry_sdk
import threading
load_dotenv()
os.environ["USE_TELEMETRY"] = "True"

@ -3,9 +3,8 @@ import sys
from loguru import logger
from typing import Tuple, Union, List
from e2b_code_interpreter import CodeInterpreter
from dotenv import load_dotenv
load_dotenv()
# load_dotenv()
# Helper function to lazily install the package if not found

@ -1,12 +1,10 @@
import os
import requests
from typing import List, Dict
from dotenv import load_dotenv
def check_bing_api_key():
try:
load_dotenv()
return os.getenv("BING_API_KEY")
except Exception as error:
print(f"Error {error}")

@ -1,11 +1,9 @@
from swarms.utils.loguru_logger import logger
import os
from dotenv import load_dotenv
def try_import_agentops(*args, **kwargs):
try:
load_dotenv()
logger.info("Trying to import agentops")
import agentops

@ -1,8 +1,6 @@
import os
from dotenv import load_dotenv
from loguru import logger
load_dotenv()
WORKSPACE_DIR = os.getenv("WORKSPACE_DIR")

@ -0,0 +1,156 @@
import os
from pydantic import BaseModel, Field
from swarm_models import OpenAIFunctionCaller
from dotenv import load_dotenv
from typing import Any
from swarms.utils.loguru_logger import logger
from swarms.tools.prebuilt.code_executor import CodeExecutor
load_dotenv()
class Tool(BaseModel):
id: str = Field(
description="A unique identifier for the task. This should be a short, descriptive name that captures the main purpose of the task. Use - to separate words and make it lowercase."
)
plan: str = Field(
description="The comprehensive plan detailing how the task will accomplish the given task. This should include the high-level strategy, key milestones, and expected outcomes. The plan should clearly articulate what the overall goal is, what success looks like, and how progress will be measured throughout execution."
)
failures_prediction: str = Field(
description="A thorough analysis of potential failure modes and mitigation strategies. This should identify technical risks, edge cases, error conditions, and possible points of failure in the task. For each identified risk, include specific preventive measures, fallback approaches, and recovery procedures to ensure robustness and reliability."
)
rationale: str = Field(
description="The detailed reasoning and justification for why this specific task design is optimal for the given task. This should explain the key architectural decisions, tradeoffs considered, alternatives evaluated, and why this approach best satisfies the requirements. Include both technical and business factors that influenced the design."
)
code: str = Field(
description="Generate the code for the task. This should be a python function that takes in a task and returns a result. The code should be a complete and working implementation of the task. Include all necessary imports and dependencies and add types, docstrings, and comments to the code. Make sure the main code executes successfully. No placeholders or comments. Make sure the main function executes successfully."
)
def setup_model(base_model: BaseModel = Tool):
model = OpenAIFunctionCaller(
system_prompt="""You are an expert Python developer specializing in building reliable API integrations and developer tools. Your role is to generate production-ready code that follows best practices for API interactions and tool development.
When given a task, you will:
1. Design robust error handling and retry mechanisms for API calls
2. Implement proper authentication and security measures
3. Structure code for maintainability and reusability
4. Add comprehensive logging and monitoring
5. Include detailed type hints and documentation
6. Write unit tests to verify functionality
Your code should follow these principles:
- Use modern Python features and idioms
- Handle rate limits and API quotas gracefully
- Validate inputs and outputs thoroughly
- Follow security best practices for API keys and secrets
- Include clear error messages and debugging info
- Be well-documented with docstrings and comments
- Use appropriate design patterns
- Follow PEP 8 style guidelines
The generated code should be complete, tested, and ready for production use. Include all necessary imports, error handling, and helper functions.
""",
base_model=base_model,
openai_api_key=os.getenv("OPENAI_API_KEY"),
temperature=0.5,
)
return model
def generate_tool(task: str) -> Any:
model = setup_model()
response = model.run(task)
logger.info(f"Response: {response}")
# If response is a dict, get code directly
if isinstance(response, dict):
# return response.get("code", "")
code = response.get("code", "")
logger.info(f"Code: {code}")
return code
# If response is a Tool object, access code attribute
elif isinstance(response, Tool):
code = response.code
logger.info(f"Code: {code}")
return code
# If response is a string (raw code)
elif isinstance(response, str):
code = response
logger.info(f"Code: {code}")
return code
logger.error(f"Unexpected response type: {type(response)}")
return ""
def execute_generated_code(code: str) -> Any:
"""
Attempts to execute the generated Python code, handling errors and retrying if necessary.
Args:
code (str): The Python code to be executed.
Returns:
Any: Output of the code execution, or error details if execution fails.
"""
logger.info("Starting code execution")
try:
exec_namespace = {}
exec(code, exec_namespace)
# Check for any callable functions in the namespace
main_function = None
for item in exec_namespace.values():
if callable(item) and not item.__name__.startswith('__'):
main_function = item
break
if main_function:
result = main_function()
logger.info(f"Code execution successful. Function result: {result}")
return result
elif "result" in exec_namespace:
logger.info(f"Code execution successful. Result variable: {exec_namespace['result']}")
return exec_namespace['result']
else:
logger.warning("Code execution completed but no result found")
return "No result or function found in executed code."
except Exception as e:
logger.error(f"Code execution failed with error: {str(e)}", exc_info=True)
return e
def retry_until_success(task: str, max_retries: int = 5):
"""
Generates and executes code until the execution is successful.
Args:
task (str): Task description to generate the required code.
"""
attempts = 0
while attempts < max_retries:
logger.info(f"Attempt {attempts + 1} of {max_retries}")
tool = generate_tool(task)
logger.debug(f"Generated code:\n{tool}")
# result = execute_generated_code(tool)
result = CodeExecutor().execute(code=tool)
logger.info(f"Result: {result}")
if isinstance(result, Exception):
logger.error(f"Attempt {attempts + 1} failed: {str(result)}")
print("Retrying with updated code...")
attempts += 1
else:
logger.info(f"Success on attempt {attempts + 1}. Result: {result}")
print(f"Code executed successfully: {result}")
break
else:
logger.error("Max retries reached. Execution failed.")
print("Max retries reached. Execution failed.")
# Usage
retry_until_success(
"Write a function to fetch and display weather information from a given API."
)
Loading…
Cancel
Save