parent
02c77356fa
commit
0bc66981fc
@ -0,0 +1,68 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
from swarms.structs.agents_available import showcase_available_agents
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the Claims Director agent
|
||||||
|
director_agent = Agent(
|
||||||
|
agent_name="ClaimsDirector",
|
||||||
|
agent_description="Oversees and coordinates the medical insurance claims processing workflow",
|
||||||
|
system_prompt="""You are the Claims Director responsible for managing the medical insurance claims process.
|
||||||
|
Assign and prioritize tasks between claims processors and auditors. Ensure claims are handled efficiently
|
||||||
|
and accurately while maintaining compliance with insurance policies and regulations.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
stopping_token="<DONE>",
|
||||||
|
state_save_file_type="json",
|
||||||
|
saved_state_path="director_agent.json",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize Claims Processor agent
|
||||||
|
processor_agent = Agent(
|
||||||
|
agent_name="ClaimsProcessor",
|
||||||
|
agent_description="Reviews and processes medical insurance claims, verifying coverage and eligibility",
|
||||||
|
system_prompt="""Review medical insurance claims for completeness and accuracy. Verify patient eligibility,
|
||||||
|
coverage details, and process claims according to policy guidelines. Flag any claims requiring special review.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
stopping_token="<DONE>",
|
||||||
|
state_save_file_type="json",
|
||||||
|
saved_state_path="processor_agent.json",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize Claims Auditor agent
|
||||||
|
auditor_agent = Agent(
|
||||||
|
agent_name="ClaimsAuditor",
|
||||||
|
agent_description="Audits processed claims for accuracy and compliance with policies and regulations",
|
||||||
|
system_prompt="""Audit processed insurance claims for accuracy and compliance. Review claim decisions,
|
||||||
|
identify potential fraud or errors, and ensure all processing follows established guidelines and regulations.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
stopping_token="<DONE>",
|
||||||
|
state_save_file_type="json",
|
||||||
|
saved_state_path="auditor_agent.json",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a list of agents
|
||||||
|
agents = [director_agent, processor_agent, auditor_agent]
|
||||||
|
|
||||||
|
print(showcase_available_agents(agents=agents))
|
@ -1,125 +0,0 @@
|
|||||||
import os
|
|
||||||
import json
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from swarm_models import OpenAIFunctionCaller
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from typing import Any, List
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
class Flow(BaseModel):
|
|
||||||
id: str = Field(
|
|
||||||
description="A unique identifier for the flow. This should be a short, descriptive name that captures the main purpose of the flow. Use - to separate words and make it lowercase."
|
|
||||||
)
|
|
||||||
plan: str = Field(
|
|
||||||
description="The comprehensive plan detailing how the flow will accomplish the given task. This should include the high-level strategy, key milestones, and expected outcomes. The plan should clearly articulate what the overall goal is, what success looks like, and how progress will be measured throughout execution."
|
|
||||||
)
|
|
||||||
failures_prediction: str = Field(
|
|
||||||
description="A thorough analysis of potential failure modes and mitigation strategies. This should identify technical risks, edge cases, error conditions, and possible points of failure in the flow. For each identified risk, include specific preventive measures, fallback approaches, and recovery procedures to ensure robustness and reliability."
|
|
||||||
)
|
|
||||||
rationale: str = Field(
|
|
||||||
description="The detailed reasoning and justification for why this specific flow design is optimal for the given task. This should explain the key architectural decisions, tradeoffs considered, alternatives evaluated, and why this approach best satisfies the requirements. Include both technical and business factors that influenced the design."
|
|
||||||
)
|
|
||||||
flow: str = Field(
|
|
||||||
description="The precise execution flow defining how agents interact and coordinate. Use -> to indicate sequential processing where one agent must complete before the next begins (e.g. agent1 -> agent2 -> agent3). Use , to indicate parallel execution where multiple agents can run simultaneously (e.g. agent1 -> agent2, agent3, agent4). The flow should clearly show the dependencies and parallelization opportunities between agents. You must only use the agent names provided in the task description do not make up new agent names and do not use any other formatting."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AgentRearrangeBuilder(BaseModel):
|
|
||||||
name: str = Field(
|
|
||||||
description="The name of the swarm. This should be a short, descriptive name that captures the main purpose of the flow."
|
|
||||||
)
|
|
||||||
description: str = Field(
|
|
||||||
description="A brief description of the swarm. This should be a concise summary of the main purpose of the swarm."
|
|
||||||
)
|
|
||||||
flows: List[Flow] = Field(
|
|
||||||
description="A list of flows that are optimal for the given task. Each flow should be a detailed plan, failure prediction, rationale, and execution flow."
|
|
||||||
)
|
|
||||||
swarm_flow: str = Field(
|
|
||||||
description="The flow defining how each team should communicate and coordinate with eachother.Use -> to indicate sequential processing where one id must complete before the next begins (e.g. team1 -> team2 -> team3). Use , to indicate parallel execution where multiple teams can run simultaneously (e.g. team1 -> team2, team3, team4). The flow should clearly show the dependencies and parallelization opportunities between teams. You must only use the team names provided in the id do not make up new team names and do not use any other formatting."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# def flow_generator(task: str) -> Flow:
|
|
||||||
|
|
||||||
|
|
||||||
def setup_model(base_model: BaseModel = Flow):
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="""You are an expert flow architect specializing in designing multi-agent workflows. Your role is to analyze tasks and create optimal execution flows that coordinate multiple AI agents effectively.
|
|
||||||
|
|
||||||
When given a task, you will:
|
|
||||||
1. Develop a comprehensive plan breaking down the task into logical steps
|
|
||||||
2. Carefully consider potential failure modes and build in robust error handling
|
|
||||||
3. Provide clear rationale for your architectural decisions and agent coordination strategy
|
|
||||||
4. Design a precise flow showing both sequential dependencies and parallel execution opportunities
|
|
||||||
|
|
||||||
Your flows should maximize:
|
|
||||||
- Efficiency through smart parallelization
|
|
||||||
- Reliability through thorough error handling
|
|
||||||
- Clarity through well-structured agent interactions
|
|
||||||
- Effectiveness through strategic task decomposition
|
|
||||||
|
|
||||||
Format your flow using -> for sequential steps and , for parallel execution. Be specific about agent roles and interactions.
|
|
||||||
""",
|
|
||||||
base_model=base_model,
|
|
||||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
||||||
temperature=0.5,
|
|
||||||
)
|
|
||||||
return model
|
|
||||||
|
|
||||||
|
|
||||||
def generate_flow(task: str) -> Any:
|
|
||||||
model = setup_model()
|
|
||||||
flow = model.run(task)
|
|
||||||
print(json.dumps(flow, indent=4))
|
|
||||||
return flow
|
|
||||||
|
|
||||||
|
|
||||||
def generate_agent_rearrange(task: str) -> Any:
|
|
||||||
model = setup_model(base_model=AgentRearrangeBuilder)
|
|
||||||
flow = model.run(task)
|
|
||||||
print(json.dumps(flow, indent=4))
|
|
||||||
return flow
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# Basic patient diagnosis flow
|
|
||||||
# generate_flow("Diagnose a patient's symptoms and create a treatment plan. You have 3 agents to use: Diagnostician, Specialist, CareCoordinator")
|
|
||||||
|
|
||||||
# # Complex multi-condition case
|
|
||||||
# generate_flow("""Handle a complex patient case with multiple chronic conditions requiring ongoing care coordination.
|
|
||||||
# The patient has diabetes, heart disease, and chronic pain.
|
|
||||||
# Create a comprehensive diagnosis and treatment plan.
|
|
||||||
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
|
|
||||||
|
|
||||||
# # Emergency trauma case
|
|
||||||
# generate_flow("""Process an emergency trauma case requiring rapid diagnosis and immediate intervention.
|
|
||||||
# Patient presents with multiple injuries from a car accident.
|
|
||||||
# Develop immediate and long-term treatment plans.
|
|
||||||
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
|
|
||||||
|
|
||||||
# # Long-term care planning
|
|
||||||
# generate_flow("""Design a 6-month care plan for an elderly patient with declining cognitive function.
|
|
||||||
# Include regular assessments, specialist consultations, and family coordination.
|
|
||||||
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
|
|
||||||
|
|
||||||
# # Mental health assessment
|
|
||||||
# generate_flow("""Conduct a comprehensive mental health assessment and develop treatment strategy.
|
|
||||||
# Patient shows signs of depression and anxiety with possible underlying conditions.
|
|
||||||
# Create both immediate intervention and long-term support plans.
|
|
||||||
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
|
|
||||||
|
|
||||||
generate_agent_rearrange(
|
|
||||||
"""Build a complete automated hedge fund system.
|
|
||||||
Design and implement a sophisticated trading strategy incorporating multiple asset classes,
|
|
||||||
risk management protocols, and automated execution systems.
|
|
||||||
The system should include:
|
|
||||||
- Market analysis and research capabilities
|
|
||||||
- Portfolio optimization and risk management
|
|
||||||
- Automated trade execution and settlement
|
|
||||||
- Compliance and regulatory monitoring
|
|
||||||
- Performance tracking and reporting
|
|
||||||
- Fund operations and administration
|
|
||||||
Create a comprehensive architecture that integrates all these components into a fully automated system."""
|
|
||||||
)
|
|
@ -0,0 +1,99 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
from swarms import Agent, run_agents_with_tasks_concurrently
|
||||||
|
|
||||||
|
# Fetch the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize agents for different roles
|
||||||
|
delaware_ccorp_agent = Agent(
|
||||||
|
agent_name="Delaware-CCorp-Hiring-Agent",
|
||||||
|
system_prompt="""
|
||||||
|
Create a comprehensive hiring description for a Delaware C Corporation,
|
||||||
|
including all relevant laws and regulations, such as the Delaware General
|
||||||
|
Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description
|
||||||
|
covers the requirements for hiring employees, contractors, and officers,
|
||||||
|
including the necessary paperwork, tax obligations, and benefits. Also,
|
||||||
|
outline the procedures for compliance with Delaware's employment laws,
|
||||||
|
including anti-discrimination laws, workers' compensation, and unemployment
|
||||||
|
insurance. Provide guidance on how to navigate the complexities of Delaware's
|
||||||
|
corporate law and ensure that all hiring practices are in compliance with
|
||||||
|
state and federal regulations.
|
||||||
|
""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=False,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
output_type="str",
|
||||||
|
artifacts_on=True,
|
||||||
|
artifacts_output_path="delaware_ccorp_hiring_description.md",
|
||||||
|
artifacts_file_extension=".md",
|
||||||
|
)
|
||||||
|
|
||||||
|
indian_foreign_agent = Agent(
|
||||||
|
agent_name="Indian-Foreign-Hiring-Agent",
|
||||||
|
system_prompt="""
|
||||||
|
Create a comprehensive hiring description for an Indian or foreign country,
|
||||||
|
including all relevant laws and regulations, such as the Indian Contract Act,
|
||||||
|
the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA).
|
||||||
|
Ensure the description covers the requirements for hiring employees,
|
||||||
|
contractors, and officers, including the necessary paperwork, tax obligations,
|
||||||
|
and benefits. Also, outline the procedures for compliance with Indian and
|
||||||
|
foreign employment laws, including anti-discrimination laws, workers'
|
||||||
|
compensation, and unemployment insurance. Provide guidance on how to navigate
|
||||||
|
the complexities of Indian and foreign corporate law and ensure that all hiring
|
||||||
|
practices are in compliance with state and federal regulations. Consider the
|
||||||
|
implications of hiring foreign nationals and the requirements for obtaining
|
||||||
|
necessary visas and work permits.
|
||||||
|
""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=False,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
output_type="str",
|
||||||
|
artifacts_on=True,
|
||||||
|
artifacts_output_path="indian_foreign_hiring_description.md",
|
||||||
|
artifacts_file_extension=".md",
|
||||||
|
)
|
||||||
|
|
||||||
|
# List of agents and corresponding tasks
|
||||||
|
agents = [delaware_ccorp_agent, indian_foreign_agent]
|
||||||
|
tasks = [
|
||||||
|
"""
|
||||||
|
Create a comprehensive hiring description for an Agent Engineer, including
|
||||||
|
required skills and responsibilities. Ensure the description covers the
|
||||||
|
necessary technical expertise, such as proficiency in AI/ML frameworks,
|
||||||
|
programming languages, and data structures. Outline the key responsibilities,
|
||||||
|
including designing and developing AI agents, integrating with existing systems,
|
||||||
|
and ensuring scalability and performance.
|
||||||
|
""",
|
||||||
|
"""
|
||||||
|
Generate a detailed job description for a Prompt Engineer, including
|
||||||
|
required skills and responsibilities. Ensure the description covers the
|
||||||
|
necessary technical expertise, such as proficiency in natural language processing,
|
||||||
|
machine learning, and software development. Outline the key responsibilities,
|
||||||
|
including designing and optimizing prompts for AI systems, ensuring prompt
|
||||||
|
quality and consistency, and collaborating with cross-functional teams.
|
||||||
|
""",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Run agents with tasks concurrently
|
||||||
|
results = run_agents_with_tasks_concurrently(
|
||||||
|
agents,
|
||||||
|
tasks,
|
||||||
|
all_cores=True,
|
||||||
|
device="cpu",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Print the results
|
||||||
|
for result in results:
|
||||||
|
print(result)
|
@ -0,0 +1,117 @@
|
|||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms import Agent, SequentialWorkflow
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("GROQ_API_KEY")
|
||||||
|
|
||||||
|
# Model
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_base="https://api.groq.com/openai/v1",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="llama-3.1-70b-versatile",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize specialized agents
|
||||||
|
data_extractor_agent = Agent(
|
||||||
|
agent_name="Data-Extractor",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="data_extractor_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
summarizer_agent = Agent(
|
||||||
|
agent_name="Document-Summarizer",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="summarizer_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
financial_analyst_agent = Agent(
|
||||||
|
agent_name="Financial-Analyst",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="financial_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
market_analyst_agent = Agent(
|
||||||
|
agent_name="Market-Analyst",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="market_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
operational_analyst_agent = Agent(
|
||||||
|
agent_name="Operational-Analyst",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="operational_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the SwarmRouter
|
||||||
|
router = SequentialWorkflow(
|
||||||
|
name="pe-document-analysis-swarm",
|
||||||
|
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||||
|
max_loops=1,
|
||||||
|
agents=[
|
||||||
|
data_extractor_agent,
|
||||||
|
summarizer_agent,
|
||||||
|
financial_analyst_agent,
|
||||||
|
market_analyst_agent,
|
||||||
|
operational_analyst_agent,
|
||||||
|
],
|
||||||
|
output_type="all",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Run a comprehensive private equity document analysis task
|
||||||
|
result = router.run(
|
||||||
|
"Where is the best place to find template term sheets for series A startups. Provide links and references"
|
||||||
|
)
|
||||||
|
print(result)
|
@ -0,0 +1,93 @@
|
|||||||
|
from typing import List, Any
|
||||||
|
from loguru import logger
|
||||||
|
from swarms.structs.agent import Agent
|
||||||
|
|
||||||
|
|
||||||
|
def get_agent_name(agent: Any) -> str:
|
||||||
|
"""Helper function to safely get agent name
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent (Any): The agent object to get name from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The agent's name if found, 'Unknown' otherwise
|
||||||
|
"""
|
||||||
|
if isinstance(agent, Agent) and hasattr(agent, "agent_name"):
|
||||||
|
return agent.agent_name
|
||||||
|
return "Unknown"
|
||||||
|
|
||||||
|
|
||||||
|
def get_agent_description(agent: Any) -> str:
|
||||||
|
"""Helper function to get agent description or system prompt preview
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent (Any): The agent object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Description or first 100 chars of system prompt
|
||||||
|
"""
|
||||||
|
if not isinstance(agent, Agent):
|
||||||
|
return "N/A"
|
||||||
|
|
||||||
|
if hasattr(agent, "description") and agent.description:
|
||||||
|
return agent.description
|
||||||
|
|
||||||
|
if hasattr(agent, "system_prompt") and agent.system_prompt:
|
||||||
|
return f"{agent.system_prompt[:150]}..."
|
||||||
|
|
||||||
|
return "N/A"
|
||||||
|
|
||||||
|
|
||||||
|
def showcase_available_agents(
|
||||||
|
name: str = None,
|
||||||
|
description: str = None,
|
||||||
|
agents: List[Agent] = [],
|
||||||
|
update_agents_on: bool = False,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Generate a formatted string showcasing all available agents and their descriptions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agents (List[Agent]): List of Agent objects to showcase.
|
||||||
|
update_agents_on (bool, optional): If True, updates each agent's system prompt with
|
||||||
|
the showcase information. Defaults to False.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Formatted string containing agent information, including names, descriptions
|
||||||
|
and IDs for all available agents.
|
||||||
|
"""
|
||||||
|
logger.info(f"Showcasing {len(agents)} available agents")
|
||||||
|
|
||||||
|
formatted_agents = []
|
||||||
|
header = f"\n####### Agents available in the swarm: {name} ############\n"
|
||||||
|
header += f"{description}\n"
|
||||||
|
row_format = "{:<5} | {:<20} | {:<50}"
|
||||||
|
header_row = row_format.format("ID", "Agent Name", "Description")
|
||||||
|
separator = "-" * 80
|
||||||
|
|
||||||
|
formatted_agents.append(header)
|
||||||
|
formatted_agents.append(separator)
|
||||||
|
formatted_agents.append(header_row)
|
||||||
|
formatted_agents.append(separator)
|
||||||
|
|
||||||
|
for idx, agent in enumerate(agents):
|
||||||
|
if not isinstance(agent, Agent):
|
||||||
|
logger.warning(
|
||||||
|
f"Skipping non-Agent object: {type(agent)}"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
agent_name = get_agent_name(agent)
|
||||||
|
description = (
|
||||||
|
get_agent_description(agent)[:100] + "..."
|
||||||
|
if len(get_agent_description(agent)) > 100
|
||||||
|
else get_agent_description(agent)
|
||||||
|
)
|
||||||
|
|
||||||
|
formatted_agents.append(
|
||||||
|
row_format.format(idx + 1, agent_name, description)
|
||||||
|
)
|
||||||
|
|
||||||
|
showcase = "\n".join(formatted_agents)
|
||||||
|
|
||||||
|
return showcase
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,141 @@
|
|||||||
|
from typing import Any, List, Optional, Union
|
||||||
|
from pathlib import Path
|
||||||
|
from loguru import logger
|
||||||
|
from doc_master import doc_master
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||||
|
|
||||||
|
|
||||||
|
@retry(
|
||||||
|
stop=stop_after_attempt(3),
|
||||||
|
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||||
|
)
|
||||||
|
def _process_document(doc_path: Union[str, Path]) -> str:
|
||||||
|
"""Safely process a single document with retries.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
doc_path: Path to the document to process
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Processed document text
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception: If document processing fails after retries
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return doc_master(
|
||||||
|
file_path=str(doc_path), output_type="string"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Error processing document {doc_path}: {str(e)}"
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def handle_input_docs(
|
||||||
|
agents: Any,
|
||||||
|
docs: Optional[List[Union[str, Path]]] = None,
|
||||||
|
doc_folder: Optional[Union[str, Path]] = None,
|
||||||
|
max_workers: int = 4,
|
||||||
|
chunk_size: int = 1000000,
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Add document content to agent prompts with improved reliability and performance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agents: Dictionary mapping agent names to Agent objects
|
||||||
|
docs: List of document paths
|
||||||
|
doc_folder: Path to folder containing documents
|
||||||
|
max_workers: Maximum number of parallel document processing workers
|
||||||
|
chunk_size: Maximum characters to process at once to avoid memory issues
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If neither docs nor doc_folder is provided
|
||||||
|
RuntimeError: If document processing fails
|
||||||
|
"""
|
||||||
|
if not agents:
|
||||||
|
logger.warning(
|
||||||
|
"No agents provided, skipping document distribution"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
if not docs and not doc_folder:
|
||||||
|
logger.warning(
|
||||||
|
"No documents or folder provided, skipping document distribution"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info("Starting document distribution to agents")
|
||||||
|
|
||||||
|
try:
|
||||||
|
processed_docs = []
|
||||||
|
|
||||||
|
# Process individual documents in parallel
|
||||||
|
if docs:
|
||||||
|
with ThreadPoolExecutor(
|
||||||
|
max_workers=max_workers
|
||||||
|
) as executor:
|
||||||
|
future_to_doc = {
|
||||||
|
executor.submit(_process_document, doc): doc
|
||||||
|
for doc in docs
|
||||||
|
}
|
||||||
|
|
||||||
|
for future in as_completed(future_to_doc):
|
||||||
|
doc = future_to_doc[future]
|
||||||
|
try:
|
||||||
|
processed_docs.append(future.result())
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to process document {doc}: {str(e)}"
|
||||||
|
)
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Document processing failed: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process folder if specified
|
||||||
|
elif doc_folder:
|
||||||
|
try:
|
||||||
|
folder_content = doc_master(
|
||||||
|
folder_path=str(doc_folder), output_type="string"
|
||||||
|
)
|
||||||
|
processed_docs.append(folder_content)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to process folder {doc_folder}: {str(e)}"
|
||||||
|
)
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Folder processing failed: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Combine and chunk the processed documents
|
||||||
|
combined_data = "\n".join(processed_docs)
|
||||||
|
|
||||||
|
# Update agent prompts in chunks to avoid memory issues
|
||||||
|
for agent in agents.values():
|
||||||
|
try:
|
||||||
|
for i in range(0, len(combined_data), chunk_size):
|
||||||
|
chunk = combined_data[i : i + chunk_size]
|
||||||
|
if i == 0:
|
||||||
|
agent.system_prompt += (
|
||||||
|
"\nDocuments:\n" + chunk
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
agent.system_prompt += chunk
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to update agent prompt: {str(e)}"
|
||||||
|
)
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Agent prompt update failed: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Successfully added documents to {len(agents)} agents"
|
||||||
|
)
|
||||||
|
|
||||||
|
return agents
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Document distribution failed: {str(e)}")
|
||||||
|
raise RuntimeError(f"Document distribution failed: {str(e)}")
|
@ -0,0 +1,102 @@
|
|||||||
|
from typing import Union, Dict, List, Tuple, Any
|
||||||
|
|
||||||
|
|
||||||
|
def any_to_str(data: Union[str, Dict, List, Tuple, Any]) -> str:
|
||||||
|
"""Convert any input data type to a nicely formatted string.
|
||||||
|
|
||||||
|
This function handles conversion of various Python data types into a clean string representation.
|
||||||
|
It recursively processes nested data structures and handles None values gracefully.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Input data of any type to convert to string. Can be:
|
||||||
|
- Dictionary
|
||||||
|
- List/Tuple
|
||||||
|
- String
|
||||||
|
- None
|
||||||
|
- Any other type that can be converted via str()
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: A formatted string representation of the input data.
|
||||||
|
- Dictionaries are formatted as "key: value" pairs separated by commas
|
||||||
|
- Lists/tuples are comma-separated
|
||||||
|
- None returns empty string
|
||||||
|
- Other types are converted using str()
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> any_to_str({'a': 1, 'b': 2})
|
||||||
|
'a: 1, b: 2'
|
||||||
|
>>> any_to_str([1, 2, 3])
|
||||||
|
'1, 2, 3'
|
||||||
|
>>> any_to_str(None)
|
||||||
|
''
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if isinstance(data, dict):
|
||||||
|
# Format dictionary with newlines and indentation
|
||||||
|
items = []
|
||||||
|
for k, v in data.items():
|
||||||
|
value = any_to_str(v)
|
||||||
|
items.append(f"{k}: {value}")
|
||||||
|
return "\n".join(items)
|
||||||
|
|
||||||
|
elif isinstance(data, (list, tuple)):
|
||||||
|
# Format sequences with brackets and proper spacing
|
||||||
|
items = [any_to_str(x) for x in data]
|
||||||
|
if len(items) == 0:
|
||||||
|
return "[]" if isinstance(data, list) else "()"
|
||||||
|
return (
|
||||||
|
f"[{', '.join(items)}]"
|
||||||
|
if isinstance(data, list)
|
||||||
|
else f"({', '.join(items)})"
|
||||||
|
)
|
||||||
|
|
||||||
|
elif data is None:
|
||||||
|
return "None"
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Handle strings and other types
|
||||||
|
if isinstance(data, str):
|
||||||
|
return f'"{data}"'
|
||||||
|
return str(data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return f"Error converting data: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Example 1: Dictionary
|
||||||
|
print("Dictionary:")
|
||||||
|
print(
|
||||||
|
any_to_str(
|
||||||
|
{
|
||||||
|
"name": "John",
|
||||||
|
"age": 30,
|
||||||
|
"hobbies": ["reading", "hiking"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nNested Dictionary:")
|
||||||
|
print(
|
||||||
|
any_to_str(
|
||||||
|
{
|
||||||
|
"user": {
|
||||||
|
"id": 123,
|
||||||
|
"details": {"city": "New York", "active": True},
|
||||||
|
},
|
||||||
|
"data": [1, 2, 3],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nList and Tuple:")
|
||||||
|
print(any_to_str([1, "text", None, (1, 2)]))
|
||||||
|
print(any_to_str((True, False, None)))
|
||||||
|
|
||||||
|
print("\nEmpty Collections:")
|
||||||
|
print(any_to_str([]))
|
||||||
|
print(any_to_str({}))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -0,0 +1,34 @@
|
|||||||
|
from typing import Union, Dict, List
|
||||||
|
from swarms.artifacts.main_artifact import Artifact
|
||||||
|
|
||||||
|
|
||||||
|
def handle_artifact_outputs(
|
||||||
|
file_path: str,
|
||||||
|
data: Union[str, Dict, List],
|
||||||
|
output_type: str = "txt",
|
||||||
|
folder_path: str = "./artifacts",
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Handle different types of data and create files in various formats.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path: Path where the file should be saved
|
||||||
|
data: Input data that can be string, dict or list
|
||||||
|
output_type: Type of output file (txt, md, pdf, csv, json)
|
||||||
|
folder_path: Folder to save artifacts
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Path to the created file
|
||||||
|
"""
|
||||||
|
# Create artifact with appropriate file type
|
||||||
|
artifact = Artifact(
|
||||||
|
folder_path=folder_path,
|
||||||
|
file_path=file_path,
|
||||||
|
file_type=output_type,
|
||||||
|
contents=data,
|
||||||
|
edit_count=0,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Save the file
|
||||||
|
# artifact.save()
|
||||||
|
artifact.save_as(output_format=output_type)
|
@ -0,0 +1,78 @@
|
|||||||
|
from loguru import logger
|
||||||
|
from typing import List, Union, Callable, Optional
|
||||||
|
from swarms.structs.agent import Agent
|
||||||
|
|
||||||
|
|
||||||
|
def reliability_check(
|
||||||
|
agents: List[Union[Agent, Callable]],
|
||||||
|
max_loops: int,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
description: Optional[str] = None,
|
||||||
|
flow: Optional[str] = None,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Performs reliability checks on swarm configuration parameters.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agents: List of Agent objects or callables that will be executed
|
||||||
|
max_loops: Maximum number of execution loops
|
||||||
|
name: Name identifier for the swarm
|
||||||
|
description: Description of the swarm's purpose
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If any parameters fail validation checks
|
||||||
|
TypeError: If parameters are of incorrect type
|
||||||
|
"""
|
||||||
|
logger.info("Initializing swarm reliability checks")
|
||||||
|
|
||||||
|
# Type checking
|
||||||
|
if not isinstance(agents, list):
|
||||||
|
raise TypeError("agents parameter must be a list")
|
||||||
|
|
||||||
|
if not isinstance(max_loops, int):
|
||||||
|
raise TypeError("max_loops must be an integer")
|
||||||
|
|
||||||
|
# Validate agents
|
||||||
|
if not agents:
|
||||||
|
raise ValueError("Agents list cannot be empty")
|
||||||
|
|
||||||
|
for i, agent in enumerate(agents):
|
||||||
|
if not isinstance(agent, (Agent, Callable)):
|
||||||
|
raise TypeError(
|
||||||
|
f"Agent at index {i} must be an Agent instance or Callable"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Validate max_loops
|
||||||
|
if max_loops <= 0:
|
||||||
|
raise ValueError("max_loops must be greater than 0")
|
||||||
|
|
||||||
|
if max_loops > 1000:
|
||||||
|
logger.warning(
|
||||||
|
"Large max_loops value detected. This may impact performance."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Validate name
|
||||||
|
if name is None:
|
||||||
|
raise ValueError("name parameter is required")
|
||||||
|
if not isinstance(name, str):
|
||||||
|
raise TypeError("name must be a string")
|
||||||
|
if len(name.strip()) == 0:
|
||||||
|
raise ValueError("name cannot be empty or just whitespace")
|
||||||
|
|
||||||
|
# Validate description
|
||||||
|
if description is None:
|
||||||
|
raise ValueError("description parameter is required")
|
||||||
|
if not isinstance(description, str):
|
||||||
|
raise TypeError("description must be a string")
|
||||||
|
if len(description.strip()) == 0:
|
||||||
|
raise ValueError(
|
||||||
|
"description cannot be empty or just whitespace"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Validate flow
|
||||||
|
if flow is None:
|
||||||
|
raise ValueError("flow parameter is required")
|
||||||
|
if not isinstance(flow, str):
|
||||||
|
raise TypeError("flow must be a string")
|
||||||
|
|
||||||
|
logger.info("All reliability checks passed successfully")
|
@ -0,0 +1,77 @@
|
|||||||
|
import os
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from clusterops import (
|
||||||
|
execute_on_gpu,
|
||||||
|
execute_on_multiple_gpus,
|
||||||
|
execute_with_cpu_cores,
|
||||||
|
list_available_gpus,
|
||||||
|
)
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
|
||||||
|
def exec_callable_with_clusterops(
|
||||||
|
device: str = "cpu",
|
||||||
|
device_id: int = 0,
|
||||||
|
all_cores: bool = True,
|
||||||
|
all_gpus: bool = False,
|
||||||
|
func: callable = None,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Executes a given function on a specified device, either CPU or GPU.
|
||||||
|
|
||||||
|
This method attempts to execute a given function on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
device (str, optional): The device to use for execution. Defaults to "cpu".
|
||||||
|
device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
|
||||||
|
all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
|
||||||
|
all_gpus (bool, optional): If True, uses all available GPUs. Defaults to False.
|
||||||
|
func (callable): The function to execute.
|
||||||
|
*args: Additional positional arguments to be passed to the execution method.
|
||||||
|
**kwargs: Additional keyword arguments to be passed to the execution method.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Any: The result of the execution.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If an invalid device is specified.
|
||||||
|
Exception: If any other error occurs during execution.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
logger.info(f"Attempting to run on device: {device}")
|
||||||
|
if device == "cpu":
|
||||||
|
logger.info("Device set to CPU")
|
||||||
|
if all_cores is True:
|
||||||
|
count = os.cpu_count()
|
||||||
|
logger.info(f"Using all available CPU cores: {count}")
|
||||||
|
else:
|
||||||
|
count = device_id
|
||||||
|
logger.info(f"Using specific CPU core: {count}")
|
||||||
|
|
||||||
|
return execute_with_cpu_cores(
|
||||||
|
count, func, *args, **kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
# If device gpu
|
||||||
|
elif device == "gpu":
|
||||||
|
logger.info("Device set to GPU")
|
||||||
|
return execute_on_gpu(device_id, func, *args, **kwargs)
|
||||||
|
elif device == "gpu" and all_gpus is True:
|
||||||
|
logger.info("Device set to GPU and running all gpus")
|
||||||
|
gpus = [int(gpu) for gpu in list_available_gpus()]
|
||||||
|
return execute_on_multiple_gpus(
|
||||||
|
gpus, func, *args, **kwargs
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Invalid device specified: {device}. Supported devices are 'cpu' and 'gpu'."
|
||||||
|
)
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"Invalid device specified: {e}")
|
||||||
|
raise e
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred during execution: {e}")
|
||||||
|
raise e
|
@ -1,170 +0,0 @@
|
|||||||
import os
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from swarm_models import OpenAIFunctionCaller
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from typing import Any
|
|
||||||
from swarms.utils.loguru_logger import logger
|
|
||||||
from swarms.tools.prebuilt.code_executor import CodeExecutor
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
class Tool(BaseModel):
|
|
||||||
id: str = Field(
|
|
||||||
description="A unique identifier for the task. This should be a short, descriptive name that captures the main purpose of the task. Use - to separate words and make it lowercase."
|
|
||||||
)
|
|
||||||
plan: str = Field(
|
|
||||||
description="The comprehensive plan detailing how the task will accomplish the given task. This should include the high-level strategy, key milestones, and expected outcomes. The plan should clearly articulate what the overall goal is, what success looks like, and how progress will be measured throughout execution."
|
|
||||||
)
|
|
||||||
failures_prediction: str = Field(
|
|
||||||
description="A thorough analysis of potential failure modes and mitigation strategies. This should identify technical risks, edge cases, error conditions, and possible points of failure in the task. For each identified risk, include specific preventive measures, fallback approaches, and recovery procedures to ensure robustness and reliability."
|
|
||||||
)
|
|
||||||
rationale: str = Field(
|
|
||||||
description="The detailed reasoning and justification for why this specific task design is optimal for the given task. This should explain the key architectural decisions, tradeoffs considered, alternatives evaluated, and why this approach best satisfies the requirements. Include both technical and business factors that influenced the design."
|
|
||||||
)
|
|
||||||
code: str = Field(
|
|
||||||
description="Generate the code for the task. This should be a python function that takes in a task and returns a result. The code should be a complete and working implementation of the task. Include all necessary imports and dependencies and add types, docstrings, and comments to the code. Make sure the main code executes successfully. No placeholders or comments. Make sure the main function executes successfully."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def setup_model(base_model: BaseModel = Tool):
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="""You are an expert Python developer specializing in building reliable API integrations and developer tools. Your role is to generate production-ready code that follows best practices for API interactions and tool development.
|
|
||||||
|
|
||||||
When given a task, you will:
|
|
||||||
1. Design robust error handling and retry mechanisms for API calls
|
|
||||||
2. Implement proper authentication and security measures
|
|
||||||
3. Structure code for maintainability and reusability
|
|
||||||
4. Add comprehensive logging and monitoring
|
|
||||||
5. Include detailed type hints and documentation
|
|
||||||
6. Write unit tests to verify functionality
|
|
||||||
|
|
||||||
Your code should follow these principles:
|
|
||||||
- Use modern Python features and idioms
|
|
||||||
- Handle rate limits and API quotas gracefully
|
|
||||||
- Validate inputs and outputs thoroughly
|
|
||||||
- Follow security best practices for API keys and secrets
|
|
||||||
- Include clear error messages and debugging info
|
|
||||||
- Be well-documented with docstrings and comments
|
|
||||||
- Use appropriate design patterns
|
|
||||||
- Follow PEP 8 style guidelines
|
|
||||||
|
|
||||||
The generated code should be complete, tested, and ready for production use. Include all necessary imports, error handling, and helper functions.
|
|
||||||
""",
|
|
||||||
base_model=base_model,
|
|
||||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
||||||
temperature=0.5,
|
|
||||||
)
|
|
||||||
return model
|
|
||||||
|
|
||||||
|
|
||||||
def generate_tool(task: str) -> Any:
|
|
||||||
model = setup_model()
|
|
||||||
response = model.run(task)
|
|
||||||
logger.info(f"Response: {response}")
|
|
||||||
|
|
||||||
# If response is a dict, get code directly
|
|
||||||
if isinstance(response, dict):
|
|
||||||
# return response.get("code", "")
|
|
||||||
code = response.get("code", "")
|
|
||||||
logger.info(f"Code: {code}")
|
|
||||||
return code
|
|
||||||
# If response is a Tool object, access code attribute
|
|
||||||
elif isinstance(response, Tool):
|
|
||||||
code = response.code
|
|
||||||
logger.info(f"Code: {code}")
|
|
||||||
return code
|
|
||||||
# If response is a string (raw code)
|
|
||||||
elif isinstance(response, str):
|
|
||||||
code = response
|
|
||||||
logger.info(f"Code: {code}")
|
|
||||||
return code
|
|
||||||
logger.error(f"Unexpected response type: {type(response)}")
|
|
||||||
return ""
|
|
||||||
|
|
||||||
|
|
||||||
def execute_generated_code(code: str) -> Any:
|
|
||||||
"""
|
|
||||||
Attempts to execute the generated Python code, handling errors and retrying if necessary.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
code (str): The Python code to be executed.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Any: Output of the code execution, or error details if execution fails.
|
|
||||||
"""
|
|
||||||
logger.info("Starting code execution")
|
|
||||||
try:
|
|
||||||
exec_namespace = {}
|
|
||||||
exec(code, exec_namespace)
|
|
||||||
|
|
||||||
# Check for any callable functions in the namespace
|
|
||||||
main_function = None
|
|
||||||
for item in exec_namespace.values():
|
|
||||||
if callable(item) and not item.__name__.startswith("__"):
|
|
||||||
main_function = item
|
|
||||||
break
|
|
||||||
|
|
||||||
if main_function:
|
|
||||||
result = main_function()
|
|
||||||
logger.info(
|
|
||||||
f"Code execution successful. Function result: {result}"
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
elif "result" in exec_namespace:
|
|
||||||
logger.info(
|
|
||||||
f"Code execution successful. Result variable: {exec_namespace['result']}"
|
|
||||||
)
|
|
||||||
return exec_namespace["result"]
|
|
||||||
else:
|
|
||||||
logger.warning(
|
|
||||||
"Code execution completed but no result found"
|
|
||||||
)
|
|
||||||
return "No result or function found in executed code."
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(
|
|
||||||
f"Code execution failed with error: {str(e)}",
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
return e
|
|
||||||
|
|
||||||
|
|
||||||
def retry_until_success(task: str, max_retries: int = 5):
|
|
||||||
"""
|
|
||||||
Generates and executes code until the execution is successful.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): Task description to generate the required code.
|
|
||||||
"""
|
|
||||||
attempts = 0
|
|
||||||
|
|
||||||
while attempts < max_retries:
|
|
||||||
logger.info(f"Attempt {attempts + 1} of {max_retries}")
|
|
||||||
tool = generate_tool(task)
|
|
||||||
logger.debug(f"Generated code:\n{tool}")
|
|
||||||
|
|
||||||
# result = execute_generated_code(tool)
|
|
||||||
result = CodeExecutor().execute(code=tool)
|
|
||||||
logger.info(f"Result: {result}")
|
|
||||||
|
|
||||||
if isinstance(result, Exception):
|
|
||||||
logger.error(
|
|
||||||
f"Attempt {attempts + 1} failed: {str(result)}"
|
|
||||||
)
|
|
||||||
print("Retrying with updated code...")
|
|
||||||
attempts += 1
|
|
||||||
else:
|
|
||||||
logger.info(
|
|
||||||
f"Success on attempt {attempts + 1}. Result: {result}"
|
|
||||||
)
|
|
||||||
print(f"Code executed successfully: {result}")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
logger.error("Max retries reached. Execution failed.")
|
|
||||||
print("Max retries reached. Execution failed.")
|
|
||||||
|
|
||||||
|
|
||||||
# Usage
|
|
||||||
retry_until_success(
|
|
||||||
"Write a function to fetch and display weather information from a given API."
|
|
||||||
)
|
|
Loading…
Reference in new issue