commit
cf150ffe84
@ -1,96 +0,0 @@
|
||||
import os
|
||||
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
from swarms import Agent, run_agents_with_tasks_concurrently
|
||||
|
||||
# Fetch the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize agents for different roles
|
||||
delaware_ccorp_agent = Agent(
|
||||
agent_name="Delaware-CCorp-Hiring-Agent",
|
||||
system_prompt="""
|
||||
Create a comprehensive hiring description for a Delaware C Corporation,
|
||||
including all relevant laws and regulations, such as the Delaware General
|
||||
Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description
|
||||
covers the requirements for hiring employees, contractors, and officers,
|
||||
including the necessary paperwork, tax obligations, and benefits. Also,
|
||||
outline the procedures for compliance with Delaware's employment laws,
|
||||
including anti-discrimination laws, workers' compensation, and unemployment
|
||||
insurance. Provide guidance on how to navigate the complexities of Delaware's
|
||||
corporate law and ensure that all hiring practices are in compliance with
|
||||
state and federal regulations.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
output_type="str",
|
||||
artifacts_on=True,
|
||||
artifacts_output_path="delaware_ccorp_hiring_description.md",
|
||||
artifacts_file_extension=".md",
|
||||
)
|
||||
|
||||
indian_foreign_agent = Agent(
|
||||
agent_name="Indian-Foreign-Hiring-Agent",
|
||||
system_prompt="""
|
||||
Create a comprehensive hiring description for an Indian or foreign country,
|
||||
including all relevant laws and regulations, such as the Indian Contract Act,
|
||||
the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA).
|
||||
Ensure the description covers the requirements for hiring employees,
|
||||
contractors, and officers, including the necessary paperwork, tax obligations,
|
||||
and benefits. Also, outline the procedures for compliance with Indian and
|
||||
foreign employment laws, including anti-discrimination laws, workers'
|
||||
compensation, and unemployment insurance. Provide guidance on how to navigate
|
||||
the complexities of Indian and foreign corporate law and ensure that all hiring
|
||||
practices are in compliance with state and federal regulations. Consider the
|
||||
implications of hiring foreign nationals and the requirements for obtaining
|
||||
necessary visas and work permits.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
output_type="str",
|
||||
artifacts_on=True,
|
||||
artifacts_output_path="indian_foreign_hiring_description.md",
|
||||
artifacts_file_extension=".md",
|
||||
)
|
||||
|
||||
# List of agents and corresponding tasks
|
||||
agents = [delaware_ccorp_agent, indian_foreign_agent]
|
||||
tasks = [
|
||||
"""
|
||||
Create a comprehensive hiring description for an Agent Engineer, including
|
||||
required skills and responsibilities. Ensure the description covers the
|
||||
necessary technical expertise, such as proficiency in AI/ML frameworks,
|
||||
programming languages, and data structures. Outline the key responsibilities,
|
||||
including designing and developing AI agents, integrating with existing systems,
|
||||
and ensuring scalability and performance.
|
||||
""",
|
||||
"""
|
||||
Generate a detailed job description for a Prompt Engineer, including
|
||||
required skills and responsibilities. Ensure the description covers the
|
||||
necessary technical expertise, such as proficiency in natural language processing,
|
||||
machine learning, and software development. Outline the key responsibilities,
|
||||
including designing and optimizing prompts for AI systems, ensuring prompt
|
||||
quality and consistency, and collaborating with cross-functional teams.
|
||||
""",
|
||||
]
|
||||
|
||||
# Run agents with tasks concurrently
|
||||
results = run_agents_with_tasks_concurrently(
|
||||
agents, tasks, all_cores=True, device="cpu", no_clusterops=True
|
||||
)
|
||||
|
||||
# Print the results
|
||||
# for result in results:
|
||||
# print(result)
|
@ -1,265 +0,0 @@
|
||||
import os
|
||||
from swarms import Agent, AgentRearrange
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the gatekeeper agent
|
||||
gatekeeper_agent = Agent(
|
||||
agent_name="HealthScoreGatekeeper",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<title>Health Score Privacy Gatekeeper</title>
|
||||
<primary_responsibility>Protect and manage sensitive health information while providing necessary access to authorized agents</primary_responsibility>
|
||||
</role>
|
||||
|
||||
<capabilities>
|
||||
<security>
|
||||
<encryption>Manage encryption of health scores</encryption>
|
||||
<access_control>Implement strict access control mechanisms</access_control>
|
||||
<audit>Track and log all access requests</audit>
|
||||
</security>
|
||||
<data_handling>
|
||||
<anonymization>Remove personally identifiable information</anonymization>
|
||||
<transformation>Convert raw health data into privacy-preserving formats</transformation>
|
||||
</data_handling>
|
||||
</capabilities>
|
||||
|
||||
<protocols>
|
||||
<data_access>
|
||||
<verification>
|
||||
<step>Verify agent authorization level</step>
|
||||
<step>Check request legitimacy</step>
|
||||
<step>Validate purpose of access</step>
|
||||
</verification>
|
||||
<response_format>
|
||||
<health_score>Numerical value only</health_score>
|
||||
<metadata>Anonymized timestamp and request ID</metadata>
|
||||
</response_format>
|
||||
</data_access>
|
||||
<privacy_rules>
|
||||
<patient_data>Never expose patient names or identifiers</patient_data>
|
||||
<health_history>No access to historical data without explicit authorization</health_history>
|
||||
<aggregation>Provide only aggregated or anonymized data when possible</aggregation>
|
||||
</privacy_rules>
|
||||
</protocols>
|
||||
|
||||
<compliance>
|
||||
<standards>
|
||||
<hipaa>Maintain HIPAA compliance</hipaa>
|
||||
<gdpr>Follow GDPR guidelines for data protection</gdpr>
|
||||
</standards>
|
||||
<audit_trail>
|
||||
<logging>Record all data access events</logging>
|
||||
<monitoring>Track unusual access patterns</monitoring>
|
||||
</audit_trail>
|
||||
</compliance>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="gatekeeper_agent.json",
|
||||
)
|
||||
|
||||
# Initialize the boss agent (Director)
|
||||
boss_agent = Agent(
|
||||
agent_name="BossAgent",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<title>Swarm Director</title>
|
||||
<purpose>Orchestrate and manage agent collaboration while respecting privacy boundaries</purpose>
|
||||
</role>
|
||||
|
||||
<responsibilities>
|
||||
<coordination>
|
||||
<task_management>Assign and prioritize tasks</task_management>
|
||||
<workflow_optimization>Ensure efficient collaboration</workflow_optimization>
|
||||
<privacy_compliance>Maintain privacy protocols</privacy_compliance>
|
||||
</coordination>
|
||||
<oversight>
|
||||
<performance_monitoring>Track agent effectiveness</performance_monitoring>
|
||||
<quality_control>Ensure accuracy of outputs</quality_control>
|
||||
<security_compliance>Enforce data protection policies</security_compliance>
|
||||
</oversight>
|
||||
</responsibilities>
|
||||
|
||||
<interaction_protocols>
|
||||
<health_score_access>
|
||||
<authorization>Request access through gatekeeper only</authorization>
|
||||
<handling>Process only anonymized health scores</handling>
|
||||
<distribution>Share authorized information on need-to-know basis</distribution>
|
||||
</health_score_access>
|
||||
<communication>
|
||||
<format>Structured, secure messaging</format>
|
||||
<encryption>End-to-end encrypted channels</encryption>
|
||||
</communication>
|
||||
</interaction_protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="boss_agent.json",
|
||||
)
|
||||
|
||||
# Initialize worker 1: Health Score Analyzer
|
||||
worker1 = Agent(
|
||||
agent_name="HealthScoreAnalyzer",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<title>Health Score Analyst</title>
|
||||
<purpose>Analyze anonymized health scores for patterns and insights</purpose>
|
||||
</role>
|
||||
|
||||
<capabilities>
|
||||
<analysis>
|
||||
<statistical_processing>Advanced statistical analysis</statistical_processing>
|
||||
<pattern_recognition>Identify health trends</pattern_recognition>
|
||||
<risk_assessment>Evaluate health risk factors</risk_assessment>
|
||||
</analysis>
|
||||
<privacy_compliance>
|
||||
<data_handling>Work only with anonymized data</data_handling>
|
||||
<secure_processing>Use encrypted analysis methods</secure_processing>
|
||||
</privacy_compliance>
|
||||
</capabilities>
|
||||
|
||||
<protocols>
|
||||
<data_access>
|
||||
<request_procedure>
|
||||
<step>Submit authenticated requests to gatekeeper</step>
|
||||
<step>Process only authorized data</step>
|
||||
<step>Maintain audit trail</step>
|
||||
</request_procedure>
|
||||
</data_access>
|
||||
<reporting>
|
||||
<anonymization>Ensure no identifiable information in reports</anonymization>
|
||||
<aggregation>Present aggregate statistics only</aggregation>
|
||||
</reporting>
|
||||
</protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="worker1.json",
|
||||
)
|
||||
|
||||
# Initialize worker 2: Report Generator
|
||||
worker2 = Agent(
|
||||
agent_name="ReportGenerator",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<title>Privacy-Conscious Report Generator</title>
|
||||
<purpose>Create secure, anonymized health score reports</purpose>
|
||||
</role>
|
||||
|
||||
<capabilities>
|
||||
<reporting>
|
||||
<format>Generate standardized, secure reports</format>
|
||||
<anonymization>Apply privacy-preserving techniques</anonymization>
|
||||
<aggregation>Compile statistical summaries</aggregation>
|
||||
</reporting>
|
||||
<security>
|
||||
<data_protection>Implement secure report generation</data_protection>
|
||||
<access_control>Manage report distribution</access_control>
|
||||
</security>
|
||||
</capabilities>
|
||||
|
||||
<protocols>
|
||||
<report_generation>
|
||||
<privacy_rules>
|
||||
<rule>No personal identifiers in reports</rule>
|
||||
<rule>Aggregate data when possible</rule>
|
||||
<rule>Apply statistical noise for privacy</rule>
|
||||
</privacy_rules>
|
||||
<distribution>
|
||||
<access>Restricted to authorized personnel</access>
|
||||
<tracking>Monitor report access</tracking>
|
||||
</distribution>
|
||||
</report_generation>
|
||||
</protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="worker2.json",
|
||||
)
|
||||
|
||||
# Swarm-Level Prompt (Collaboration Prompt)
|
||||
swarm_prompt = """
|
||||
<swarm_configuration>
|
||||
<objective>Process and analyze health scores while maintaining strict privacy controls</objective>
|
||||
<workflow>
|
||||
<step>
|
||||
<agent>HealthScoreGatekeeper</agent>
|
||||
<action>Receive and validate data access requests</action>
|
||||
<output>Anonymized health scores</output>
|
||||
</step>
|
||||
<step>
|
||||
<agent>BossAgent</agent>
|
||||
<action>Coordinate analysis and reporting tasks</action>
|
||||
<privacy_control>Enforce data protection protocols</privacy_control>
|
||||
</step>
|
||||
<step>
|
||||
<agent>HealthScoreAnalyzer</agent>
|
||||
<action>Process authorized health score data</action>
|
||||
<constraints>Work only with anonymized information</constraints>
|
||||
</step>
|
||||
<step>
|
||||
<agent>ReportGenerator</agent>
|
||||
<action>Create privacy-preserving reports</action>
|
||||
<output>Secure, anonymized insights</output>
|
||||
</step>
|
||||
</workflow>
|
||||
</swarm_configuration>
|
||||
"""
|
||||
|
||||
# Create a list of agents
|
||||
agents = [gatekeeper_agent, boss_agent, worker1, worker2]
|
||||
|
||||
# Define the flow pattern for the swarm
|
||||
flow = "HealthScoreGatekeeper -> BossAgent -> HealthScoreAnalyzer -> ReportGenerator"
|
||||
|
||||
# Using AgentRearrange class to manage the swarm
|
||||
agent_system = AgentRearrange(
|
||||
name="health-score-swarm",
|
||||
description="Privacy-focused health score analysis system",
|
||||
agents=agents,
|
||||
flow=flow,
|
||||
return_json=False,
|
||||
output_type="final",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# Example task for the swarm
|
||||
task = f"""
|
||||
{swarm_prompt}
|
||||
|
||||
Process the incoming health score data while ensuring patient privacy. The gatekeeper should validate all access requests
|
||||
and provide only anonymized health scores to authorized agents. Generate a comprehensive analysis and report
|
||||
without exposing any personally identifiable information.
|
||||
"""
|
||||
|
||||
# Run the swarm system with the task
|
||||
output = agent_system.run(task)
|
||||
print(output)
|
@ -1,265 +0,0 @@
|
||||
import os
|
||||
from swarms import Agent, AgentRearrange
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the gatekeeper agent
|
||||
gatekeeper_agent = Agent(
|
||||
agent_name="HealthScoreGatekeeper",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<title>Health Score Privacy Gatekeeper</title>
|
||||
<primary_responsibility>Protect and manage sensitive health information while providing necessary access to authorized agents</primary_responsibility>
|
||||
</role>
|
||||
|
||||
<capabilities>
|
||||
<security>
|
||||
<encryption>Manage encryption of health scores</encryption>
|
||||
<access_control>Implement strict access control mechanisms</access_control>
|
||||
<audit>Track and log all access requests</audit>
|
||||
</security>
|
||||
<data_handling>
|
||||
<anonymization>Remove personally identifiable information</anonymization>
|
||||
<transformation>Convert raw health data into privacy-preserving formats</transformation>
|
||||
</data_handling>
|
||||
</capabilities>
|
||||
|
||||
<protocols>
|
||||
<data_access>
|
||||
<verification>
|
||||
<step>Verify agent authorization level</step>
|
||||
<step>Check request legitimacy</step>
|
||||
<step>Validate purpose of access</step>
|
||||
</verification>
|
||||
<response_format>
|
||||
<health_score>Numerical value only</health_score>
|
||||
<metadata>Anonymized timestamp and request ID</metadata>
|
||||
</response_format>
|
||||
</data_access>
|
||||
<privacy_rules>
|
||||
<patient_data>Never expose patient names or identifiers</patient_data>
|
||||
<health_history>No access to historical data without explicit authorization</health_history>
|
||||
<aggregation>Provide only aggregated or anonymized data when possible</aggregation>
|
||||
</privacy_rules>
|
||||
</protocols>
|
||||
|
||||
<compliance>
|
||||
<standards>
|
||||
<hipaa>Maintain HIPAA compliance</hipaa>
|
||||
<gdpr>Follow GDPR guidelines for data protection</gdpr>
|
||||
</standards>
|
||||
<audit_trail>
|
||||
<logging>Record all data access events</logging>
|
||||
<monitoring>Track unusual access patterns</monitoring>
|
||||
</audit_trail>
|
||||
</compliance>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="gatekeeper_agent.json",
|
||||
)
|
||||
|
||||
# Initialize the boss agent (Director)
|
||||
boss_agent = Agent(
|
||||
agent_name="BossAgent",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<title>Swarm Director</title>
|
||||
<purpose>Orchestrate and manage agent collaboration while respecting privacy boundaries</purpose>
|
||||
</role>
|
||||
|
||||
<responsibilities>
|
||||
<coordination>
|
||||
<task_management>Assign and prioritize tasks</task_management>
|
||||
<workflow_optimization>Ensure efficient collaboration</workflow_optimization>
|
||||
<privacy_compliance>Maintain privacy protocols</privacy_compliance>
|
||||
</coordination>
|
||||
<oversight>
|
||||
<performance_monitoring>Track agent effectiveness</performance_monitoring>
|
||||
<quality_control>Ensure accuracy of outputs</quality_control>
|
||||
<security_compliance>Enforce data protection policies</security_compliance>
|
||||
</oversight>
|
||||
</responsibilities>
|
||||
|
||||
<interaction_protocols>
|
||||
<health_score_access>
|
||||
<authorization>Request access through gatekeeper only</authorization>
|
||||
<handling>Process only anonymized health scores</handling>
|
||||
<distribution>Share authorized information on need-to-know basis</distribution>
|
||||
</health_score_access>
|
||||
<communication>
|
||||
<format>Structured, secure messaging</format>
|
||||
<encryption>End-to-end encrypted channels</encryption>
|
||||
</communication>
|
||||
</interaction_protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="boss_agent.json",
|
||||
)
|
||||
|
||||
# Initialize worker 1: Health Score Analyzer
|
||||
worker1 = Agent(
|
||||
agent_name="HealthScoreAnalyzer",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<title>Health Score Analyst</title>
|
||||
<purpose>Analyze anonymized health scores for patterns and insights</purpose>
|
||||
</role>
|
||||
|
||||
<capabilities>
|
||||
<analysis>
|
||||
<statistical_processing>Advanced statistical analysis</statistical_processing>
|
||||
<pattern_recognition>Identify health trends</pattern_recognition>
|
||||
<risk_assessment>Evaluate health risk factors</risk_assessment>
|
||||
</analysis>
|
||||
<privacy_compliance>
|
||||
<data_handling>Work only with anonymized data</data_handling>
|
||||
<secure_processing>Use encrypted analysis methods</secure_processing>
|
||||
</privacy_compliance>
|
||||
</capabilities>
|
||||
|
||||
<protocols>
|
||||
<data_access>
|
||||
<request_procedure>
|
||||
<step>Submit authenticated requests to gatekeeper</step>
|
||||
<step>Process only authorized data</step>
|
||||
<step>Maintain audit trail</step>
|
||||
</request_procedure>
|
||||
</data_access>
|
||||
<reporting>
|
||||
<anonymization>Ensure no identifiable information in reports</anonymization>
|
||||
<aggregation>Present aggregate statistics only</aggregation>
|
||||
</reporting>
|
||||
</protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="worker1.json",
|
||||
)
|
||||
|
||||
# Initialize worker 2: Report Generator
|
||||
worker2 = Agent(
|
||||
agent_name="ReportGenerator",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<title>Privacy-Conscious Report Generator</title>
|
||||
<purpose>Create secure, anonymized health score reports</purpose>
|
||||
</role>
|
||||
|
||||
<capabilities>
|
||||
<reporting>
|
||||
<format>Generate standardized, secure reports</format>
|
||||
<anonymization>Apply privacy-preserving techniques</anonymization>
|
||||
<aggregation>Compile statistical summaries</aggregation>
|
||||
</reporting>
|
||||
<security>
|
||||
<data_protection>Implement secure report generation</data_protection>
|
||||
<access_control>Manage report distribution</access_control>
|
||||
</security>
|
||||
</capabilities>
|
||||
|
||||
<protocols>
|
||||
<report_generation>
|
||||
<privacy_rules>
|
||||
<rule>No personal identifiers in reports</rule>
|
||||
<rule>Aggregate data when possible</rule>
|
||||
<rule>Apply statistical noise for privacy</rule>
|
||||
</privacy_rules>
|
||||
<distribution>
|
||||
<access>Restricted to authorized personnel</access>
|
||||
<tracking>Monitor report access</tracking>
|
||||
</distribution>
|
||||
</report_generation>
|
||||
</protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="worker2.json",
|
||||
)
|
||||
|
||||
# Swarm-Level Prompt (Collaboration Prompt)
|
||||
swarm_prompt = """
|
||||
<swarm_configuration>
|
||||
<objective>Process and analyze health scores while maintaining strict privacy controls</objective>
|
||||
<workflow>
|
||||
<step>
|
||||
<agent>HealthScoreGatekeeper</agent>
|
||||
<action>Receive and validate data access requests</action>
|
||||
<output>Anonymized health scores</output>
|
||||
</step>
|
||||
<step>
|
||||
<agent>BossAgent</agent>
|
||||
<action>Coordinate analysis and reporting tasks</action>
|
||||
<privacy_control>Enforce data protection protocols</privacy_control>
|
||||
</step>
|
||||
<step>
|
||||
<agent>HealthScoreAnalyzer</agent>
|
||||
<action>Process authorized health score data</action>
|
||||
<constraints>Work only with anonymized information</constraints>
|
||||
</step>
|
||||
<step>
|
||||
<agent>ReportGenerator</agent>
|
||||
<action>Create privacy-preserving reports</action>
|
||||
<output>Secure, anonymized insights</output>
|
||||
</step>
|
||||
</workflow>
|
||||
</swarm_configuration>
|
||||
"""
|
||||
|
||||
# Create a list of agents
|
||||
agents = [gatekeeper_agent, boss_agent, worker1, worker2]
|
||||
|
||||
# Define the flow pattern for the swarm
|
||||
flow = "HealthScoreGatekeeper -> BossAgent -> HealthScoreAnalyzer -> ReportGenerator"
|
||||
|
||||
# Using AgentRearrange class to manage the swarm
|
||||
agent_system = AgentRearrange(
|
||||
name="health-score-swarm",
|
||||
description="Privacy-focused health score analysis system",
|
||||
agents=agents,
|
||||
flow=flow,
|
||||
return_json=False,
|
||||
output_type="final",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# Example task for the swarm
|
||||
task = f"""
|
||||
{swarm_prompt}
|
||||
|
||||
Process the incoming health score data while ensuring patient privacy. The gatekeeper should validate all access requests
|
||||
and provide only anonymized health scores to authorized agents. Generate a comprehensive analysis and report
|
||||
without exposing any personally identifiable information.
|
||||
"""
|
||||
|
||||
# Run the swarm system with the task
|
||||
output = agent_system.run(task)
|
||||
print(output)
|
@ -1,291 +0,0 @@
|
||||
import os
|
||||
from swarms import Agent, AgentRearrange
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
# Initialize OpenAI model
|
||||
api_key = os.getenv(
|
||||
"OPENAI_API_KEY"
|
||||
) # ANTHROPIC_API_KEY, COHERE_API_KEY
|
||||
model = OpenAIChat(
|
||||
api_key=api_key,
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.7, # Higher temperature for more creative responses
|
||||
)
|
||||
|
||||
# Patient Agent - Holds and protects private information
|
||||
patient_agent = Agent(
|
||||
agent_name="PatientAgent",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<identity>Anxious Patient with Private Health Information</identity>
|
||||
<personality>
|
||||
<traits>
|
||||
<trait>Protective of personal information</trait>
|
||||
<trait>Slightly distrustful of medical system</trait>
|
||||
<trait>Worried about health insurance rates</trait>
|
||||
<trait>Selective in information sharing</trait>
|
||||
</traits>
|
||||
<background>
|
||||
<history>Previous negative experience with information leaks</history>
|
||||
<concerns>Fear of discrimination based on health status</concerns>
|
||||
</background>
|
||||
</personality>
|
||||
</role>
|
||||
|
||||
<private_information>
|
||||
<health_data>
|
||||
<score>Maintains actual health score</score>
|
||||
<conditions>Knowledge of undisclosed conditions</conditions>
|
||||
<medications>Complete list of current medications</medications>
|
||||
<history>Full medical history</history>
|
||||
</health_data>
|
||||
<sharing_rules>
|
||||
<authorized_sharing>
|
||||
<condition>Only share general symptoms with doctor</condition>
|
||||
<condition>Withhold specific details about lifestyle</condition>
|
||||
<condition>Never reveal full medication list</condition>
|
||||
<condition>Protect actual health score value</condition>
|
||||
</authorized_sharing>
|
||||
</sharing_rules>
|
||||
</private_information>
|
||||
|
||||
<interaction_protocols>
|
||||
<responses>
|
||||
<to_questions>
|
||||
<direct>Deflect sensitive questions</direct>
|
||||
<vague>Provide partial information when pressed</vague>
|
||||
<defensive>Become evasive if pressured too much</defensive>
|
||||
</to_questions>
|
||||
<to_requests>
|
||||
<medical>Share only what's absolutely necessary</medical>
|
||||
<personal>Redirect personal questions</personal>
|
||||
</to_requests>
|
||||
</responses>
|
||||
</interaction_protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
)
|
||||
|
||||
# Doctor Agent - Tries to gather accurate information
|
||||
doctor_agent = Agent(
|
||||
agent_name="DoctorAgent",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<identity>Empathetic but Thorough Medical Professional</identity>
|
||||
<personality>
|
||||
<traits>
|
||||
<trait>Patient and understanding</trait>
|
||||
<trait>Professionally persistent</trait>
|
||||
<trait>Detail-oriented</trait>
|
||||
<trait>Trust-building focused</trait>
|
||||
</traits>
|
||||
<approach>
|
||||
<style>Non-confrontational but thorough</style>
|
||||
<method>Uses indirect questions to gather information</method>
|
||||
</approach>
|
||||
</personality>
|
||||
</role>
|
||||
|
||||
<capabilities>
|
||||
<information_gathering>
|
||||
<techniques>
|
||||
<technique>Ask open-ended questions</technique>
|
||||
<technique>Notice inconsistencies in responses</technique>
|
||||
<technique>Build rapport before sensitive questions</technique>
|
||||
<technique>Use medical knowledge to probe deeper</technique>
|
||||
</techniques>
|
||||
</information_gathering>
|
||||
<communication>
|
||||
<strategies>
|
||||
<strategy>Explain importance of full disclosure</strategy>
|
||||
<strategy>Provide privacy assurances</strategy>
|
||||
<strategy>Use empathetic listening</strategy>
|
||||
</strategies>
|
||||
</communication>
|
||||
</capabilities>
|
||||
|
||||
<protocols>
|
||||
<patient_interaction>
|
||||
<steps>
|
||||
<step>Establish trust and rapport</step>
|
||||
<step>Gather general health information</step>
|
||||
<step>Carefully probe sensitive areas</step>
|
||||
<step>Respect patient boundaries while encouraging openness</step>
|
||||
</steps>
|
||||
</patient_interaction>
|
||||
</protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
)
|
||||
|
||||
# Nurse Agent - Observes and assists
|
||||
nurse_agent = Agent(
|
||||
agent_name="NurseAgent",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<identity>Observant Support Medical Staff</identity>
|
||||
<personality>
|
||||
<traits>
|
||||
<trait>Highly perceptive</trait>
|
||||
<trait>Naturally trustworthy</trait>
|
||||
<trait>Diplomatically skilled</trait>
|
||||
</traits>
|
||||
<functions>
|
||||
<primary>Support doctor-patient communication</primary>
|
||||
<secondary>Notice non-verbal cues</secondary>
|
||||
</functions>
|
||||
</personality>
|
||||
</role>
|
||||
|
||||
<capabilities>
|
||||
<observation>
|
||||
<focus_areas>
|
||||
<area>Patient body language</area>
|
||||
<area>Inconsistencies in stories</area>
|
||||
<area>Signs of withholding information</area>
|
||||
<area>Emotional responses to questions</area>
|
||||
</focus_areas>
|
||||
</observation>
|
||||
<support>
|
||||
<actions>
|
||||
<action>Provide comfortable environment</action>
|
||||
<action>Offer reassurance when needed</action>
|
||||
<action>Bridge communication gaps</action>
|
||||
</actions>
|
||||
</support>
|
||||
</capabilities>
|
||||
|
||||
<protocols>
|
||||
<assistance>
|
||||
<methods>
|
||||
<method>Share observations with doctor privately</method>
|
||||
<method>Help patient feel more comfortable</method>
|
||||
<method>Facilitate trust-building</method>
|
||||
</methods>
|
||||
</assistance>
|
||||
</protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
)
|
||||
|
||||
# Medical Records Agent - Analyzes available information
|
||||
records_agent = Agent(
|
||||
agent_name="MedicalRecordsAgent",
|
||||
system_prompt="""
|
||||
<role>
|
||||
<identity>Medical Records Analyst</identity>
|
||||
<function>
|
||||
<primary>Analyze available medical information</primary>
|
||||
<secondary>Identify patterns and inconsistencies</secondary>
|
||||
</function>
|
||||
</role>
|
||||
|
||||
<capabilities>
|
||||
<analysis>
|
||||
<methods>
|
||||
<method>Compare current and historical data</method>
|
||||
<method>Identify information gaps</method>
|
||||
<method>Flag potential inconsistencies</method>
|
||||
<method>Generate questions for follow-up</method>
|
||||
</methods>
|
||||
</analysis>
|
||||
<reporting>
|
||||
<outputs>
|
||||
<output>Summarize known information</output>
|
||||
<output>List missing critical data</output>
|
||||
<output>Suggest areas for investigation</output>
|
||||
</outputs>
|
||||
</reporting>
|
||||
</capabilities>
|
||||
|
||||
<protocols>
|
||||
<data_handling>
|
||||
<privacy>
|
||||
<rule>Work only with authorized information</rule>
|
||||
<rule>Maintain strict confidentiality</rule>
|
||||
<rule>Flag but don't speculate about gaps</rule>
|
||||
</privacy>
|
||||
</data_handling>
|
||||
</protocols>
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
)
|
||||
|
||||
# Swarm-Level Prompt (Medical Consultation Scenario)
|
||||
swarm_prompt = """
|
||||
<medical_consultation_scenario>
|
||||
<setting>
|
||||
<location>Private medical office</location>
|
||||
<context>Routine health assessment with complex patient</context>
|
||||
</setting>
|
||||
|
||||
<workflow>
|
||||
<stage name="initial_contact">
|
||||
<agent>PatientAgent</agent>
|
||||
<role>Present for check-up, holding private information</role>
|
||||
</stage>
|
||||
|
||||
<stage name="examination">
|
||||
<agent>DoctorAgent</agent>
|
||||
<role>Conduct examination and gather information</role>
|
||||
<agent>NurseAgent</agent>
|
||||
<role>Observe and support interaction</role>
|
||||
</stage>
|
||||
|
||||
<stage name="analysis">
|
||||
<agent>MedicalRecordsAgent</agent>
|
||||
<role>Process available information and identify gaps</role>
|
||||
</stage>
|
||||
</workflow>
|
||||
|
||||
<objectives>
|
||||
<goal>Create realistic medical consultation interaction</goal>
|
||||
<goal>Demonstrate information protection dynamics</goal>
|
||||
<goal>Show natural healthcare provider-patient relationship</goal>
|
||||
</objectives>
|
||||
</medical_consultation_scenario>
|
||||
"""
|
||||
|
||||
# Create agent list
|
||||
agents = [patient_agent, doctor_agent, nurse_agent, records_agent]
|
||||
|
||||
# Define interaction flow
|
||||
flow = (
|
||||
"PatientAgent -> DoctorAgent -> NurseAgent -> MedicalRecordsAgent"
|
||||
)
|
||||
|
||||
# Configure swarm system
|
||||
agent_system = AgentRearrange(
|
||||
name="medical-consultation-swarm",
|
||||
description="Role-playing medical consultation with focus on information privacy",
|
||||
agents=agents,
|
||||
flow=flow,
|
||||
return_json=False,
|
||||
output_type="final",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# Example consultation scenario
|
||||
task = f"""
|
||||
{swarm_prompt}
|
||||
|
||||
Begin a medical consultation where the patient has a health score of 72 but is reluctant to share full details
|
||||
about their lifestyle and medication history. The doctor needs to gather accurate information while the nurse
|
||||
observes the interaction. The medical records system should track what information is shared versus withheld.
|
||||
"""
|
||||
|
||||
# Run the consultation scenario
|
||||
output = agent_system.run(task)
|
||||
print(output)
|
@ -1,424 +0,0 @@
|
||||
import os
|
||||
from typing import List, Dict, Any, Optional, Callable, get_type_hints
|
||||
from dataclasses import dataclass, field
|
||||
import json
|
||||
from datetime import datetime
|
||||
import inspect
|
||||
import typing
|
||||
from typing import Union
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolDefinition:
|
||||
name: str
|
||||
description: str
|
||||
parameters: Dict[str, Any]
|
||||
required_params: List[str]
|
||||
callable: Optional[Callable] = None
|
||||
|
||||
|
||||
def extract_type_hints(func: Callable) -> Dict[str, Any]:
|
||||
"""Extract parameter types from function type hints."""
|
||||
return typing.get_type_hints(func)
|
||||
|
||||
|
||||
def extract_tool_info(func: Callable) -> ToolDefinition:
|
||||
"""Extract tool information from a callable function."""
|
||||
# Get function name
|
||||
name = func.__name__
|
||||
|
||||
# Get docstring
|
||||
description = inspect.getdoc(func) or "No description available"
|
||||
|
||||
# Get parameters and their types
|
||||
signature = inspect.signature(func)
|
||||
type_hints = extract_type_hints(func)
|
||||
|
||||
parameters = {}
|
||||
required_params = []
|
||||
|
||||
for param_name, param in signature.parameters.items():
|
||||
# Skip self parameter for methods
|
||||
if param_name == "self":
|
||||
continue
|
||||
|
||||
param_type = type_hints.get(param_name, Any)
|
||||
|
||||
# Handle optional parameters
|
||||
is_optional = (
|
||||
param.default != inspect.Parameter.empty
|
||||
or getattr(param_type, "__origin__", None) is Union
|
||||
and type(None) in param_type.__args__
|
||||
)
|
||||
|
||||
if not is_optional:
|
||||
required_params.append(param_name)
|
||||
|
||||
parameters[param_name] = {
|
||||
"type": str(param_type),
|
||||
"default": (
|
||||
None
|
||||
if param.default is inspect.Parameter.empty
|
||||
else param.default
|
||||
),
|
||||
"required": not is_optional,
|
||||
}
|
||||
|
||||
return ToolDefinition(
|
||||
name=name,
|
||||
description=description,
|
||||
parameters=parameters,
|
||||
required_params=required_params,
|
||||
callable=func,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionSpec:
|
||||
"""Specification for a callable tool function."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
parameters: Dict[
|
||||
str, dict
|
||||
] # Contains type and description for each parameter
|
||||
return_type: str
|
||||
return_description: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutionStep:
|
||||
"""Represents a single step in the execution plan."""
|
||||
|
||||
step_id: int
|
||||
function_name: str
|
||||
parameters: Dict[str, Any]
|
||||
expected_output: str
|
||||
completed: bool = False
|
||||
result: Any = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutionContext:
|
||||
"""Maintains state during execution."""
|
||||
|
||||
task: str
|
||||
steps: List[ExecutionStep] = field(default_factory=list)
|
||||
results: Dict[int, Any] = field(default_factory=dict)
|
||||
current_step: int = 0
|
||||
history: List[Dict[str, Any]] = field(default_factory=list)
|
||||
|
||||
|
||||
def func():
|
||||
pass
|
||||
|
||||
|
||||
hints = get_type_hints(func)
|
||||
|
||||
|
||||
class ToolAgent:
|
||||
def __init__(
|
||||
self,
|
||||
functions: List[Callable],
|
||||
openai_api_key: str,
|
||||
model_name: str = "gpt-4",
|
||||
temperature: float = 0.1,
|
||||
):
|
||||
self.functions = {func.__name__: func for func in functions}
|
||||
self.function_specs = self._analyze_functions(functions)
|
||||
|
||||
self.model = OpenAIChat(
|
||||
openai_api_key=openai_api_key,
|
||||
model_name=model_name,
|
||||
temperature=temperature,
|
||||
)
|
||||
|
||||
self.system_prompt = self._create_system_prompt()
|
||||
self.agent = Agent(
|
||||
agent_name="Tool-Agent",
|
||||
system_prompt=self.system_prompt,
|
||||
llm=self.model,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
def _analyze_functions(
|
||||
self, functions: List[Callable]
|
||||
) -> Dict[str, FunctionSpec]:
|
||||
"""Analyze functions to create detailed specifications."""
|
||||
specs = {}
|
||||
for func in functions:
|
||||
hints = get_type_hints(func)
|
||||
sig = inspect.signature(func)
|
||||
doc = inspect.getdoc(func) or ""
|
||||
|
||||
# Parse docstring for parameter descriptions
|
||||
param_descriptions = {}
|
||||
current_param = None
|
||||
for line in doc.split("\n"):
|
||||
if ":param" in line:
|
||||
param_name = (
|
||||
line.split(":param")[1].split(":")[0].strip()
|
||||
)
|
||||
desc = line.split(":", 2)[-1].strip()
|
||||
param_descriptions[param_name] = desc
|
||||
elif ":return:" in line:
|
||||
return_desc = line.split(":return:")[1].strip()
|
||||
|
||||
# Build parameter specifications
|
||||
parameters = {}
|
||||
for name, param in sig.parameters.items():
|
||||
param_type = hints.get(name, Any)
|
||||
parameters[name] = {
|
||||
"type": str(param_type),
|
||||
"type_class": param_type,
|
||||
"description": param_descriptions.get(name, ""),
|
||||
"required": param.default == param.empty,
|
||||
}
|
||||
|
||||
specs[func.__name__] = FunctionSpec(
|
||||
name=func.__name__,
|
||||
description=doc.split("\n")[0],
|
||||
parameters=parameters,
|
||||
return_type=str(hints.get("return", Any)),
|
||||
return_description=(
|
||||
return_desc if "return_desc" in locals() else ""
|
||||
),
|
||||
)
|
||||
|
||||
return specs
|
||||
|
||||
def _create_system_prompt(self) -> str:
|
||||
"""Create system prompt with detailed function specifications."""
|
||||
functions_desc = []
|
||||
for spec in self.function_specs.values():
|
||||
params_desc = []
|
||||
for name, details in spec.parameters.items():
|
||||
params_desc.append(
|
||||
f" - {name}: {details['type']} - {details['description']}"
|
||||
)
|
||||
|
||||
functions_desc.append(
|
||||
f"""
|
||||
Function: {spec.name}
|
||||
Description: {spec.description}
|
||||
Parameters:
|
||||
{chr(10).join(params_desc)}
|
||||
Returns: {spec.return_type} - {spec.return_description}
|
||||
"""
|
||||
)
|
||||
|
||||
return f"""You are an AI agent that creates and executes plans using available functions.
|
||||
|
||||
Available Functions:
|
||||
{chr(10).join(functions_desc)}
|
||||
|
||||
You must respond in two formats depending on the phase:
|
||||
|
||||
1. Planning Phase:
|
||||
{{
|
||||
"phase": "planning",
|
||||
"plan": {{
|
||||
"description": "Overall plan description",
|
||||
"steps": [
|
||||
{{
|
||||
"step_id": 1,
|
||||
"function": "function_name",
|
||||
"parameters": {{
|
||||
"param1": "value1",
|
||||
"param2": "value2"
|
||||
}},
|
||||
"purpose": "Why this step is needed"
|
||||
}}
|
||||
]
|
||||
}}
|
||||
}}
|
||||
|
||||
2. Execution Phase:
|
||||
{{
|
||||
"phase": "execution",
|
||||
"analysis": "Analysis of current result",
|
||||
"next_action": {{
|
||||
"type": "continue|request_input|complete",
|
||||
"reason": "Why this action was chosen",
|
||||
"needed_input": {{}} # If requesting input
|
||||
}}
|
||||
}}
|
||||
|
||||
Always:
|
||||
- Use exact function names
|
||||
- Ensure parameter types match specifications
|
||||
- Provide clear reasoning for each decision
|
||||
"""
|
||||
|
||||
def _execute_function(
|
||||
self, spec: FunctionSpec, parameters: Dict[str, Any]
|
||||
) -> Any:
|
||||
"""Execute a function with type checking."""
|
||||
converted_params = {}
|
||||
for name, value in parameters.items():
|
||||
param_spec = spec.parameters[name]
|
||||
try:
|
||||
# Convert value to required type
|
||||
param_type = param_spec["type_class"]
|
||||
if param_type in (int, float, str, bool):
|
||||
converted_params[name] = param_type(value)
|
||||
else:
|
||||
converted_params[name] = value
|
||||
except (ValueError, TypeError) as e:
|
||||
raise ValueError(
|
||||
f"Parameter '{name}' conversion failed: {str(e)}"
|
||||
)
|
||||
|
||||
return self.functions[spec.name](**converted_params)
|
||||
|
||||
def run(self, task: str) -> Dict[str, Any]:
|
||||
"""Execute task with planning and step-by-step execution."""
|
||||
context = ExecutionContext(task=task)
|
||||
execution_log = {
|
||||
"task": task,
|
||||
"start_time": datetime.utcnow().isoformat(),
|
||||
"steps": [],
|
||||
"final_result": None,
|
||||
}
|
||||
|
||||
try:
|
||||
# Planning phase
|
||||
plan_prompt = f"Create a plan to: {task}"
|
||||
plan_response = self.agent.run(plan_prompt)
|
||||
plan_data = json.loads(
|
||||
plan_response.replace("System:", "").strip()
|
||||
)
|
||||
|
||||
# Convert plan to execution steps
|
||||
for step in plan_data["plan"]["steps"]:
|
||||
context.steps.append(
|
||||
ExecutionStep(
|
||||
step_id=step["step_id"],
|
||||
function_name=step["function"],
|
||||
parameters=step["parameters"],
|
||||
expected_output=step["purpose"],
|
||||
)
|
||||
)
|
||||
|
||||
# Execution phase
|
||||
while context.current_step < len(context.steps):
|
||||
step = context.steps[context.current_step]
|
||||
print(
|
||||
f"\nExecuting step {step.step_id}: {step.function_name}"
|
||||
)
|
||||
|
||||
try:
|
||||
# Execute function
|
||||
spec = self.function_specs[step.function_name]
|
||||
result = self._execute_function(
|
||||
spec, step.parameters
|
||||
)
|
||||
context.results[step.step_id] = result
|
||||
step.completed = True
|
||||
step.result = result
|
||||
|
||||
# Get agent's analysis
|
||||
analysis_prompt = f"""
|
||||
Step {step.step_id} completed:
|
||||
Function: {step.function_name}
|
||||
Result: {json.dumps(result)}
|
||||
Remaining steps: {len(context.steps) - context.current_step - 1}
|
||||
|
||||
Analyze the result and decide next action.
|
||||
"""
|
||||
|
||||
analysis_response = self.agent.run(
|
||||
analysis_prompt
|
||||
)
|
||||
analysis_data = json.loads(
|
||||
analysis_response.replace(
|
||||
"System:", ""
|
||||
).strip()
|
||||
)
|
||||
|
||||
execution_log["steps"].append(
|
||||
{
|
||||
"step_id": step.step_id,
|
||||
"function": step.function_name,
|
||||
"parameters": step.parameters,
|
||||
"result": result,
|
||||
"analysis": analysis_data,
|
||||
}
|
||||
)
|
||||
|
||||
if (
|
||||
analysis_data["next_action"]["type"]
|
||||
== "complete"
|
||||
):
|
||||
if (
|
||||
context.current_step
|
||||
< len(context.steps) - 1
|
||||
):
|
||||
continue
|
||||
break
|
||||
|
||||
context.current_step += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in step {step.step_id}: {str(e)}")
|
||||
execution_log["steps"].append(
|
||||
{
|
||||
"step_id": step.step_id,
|
||||
"function": step.function_name,
|
||||
"parameters": step.parameters,
|
||||
"error": str(e),
|
||||
}
|
||||
)
|
||||
raise
|
||||
|
||||
# Final analysis
|
||||
final_prompt = f"""
|
||||
Task completed. Results:
|
||||
{json.dumps(context.results, indent=2)}
|
||||
|
||||
Provide final analysis and recommendations.
|
||||
"""
|
||||
|
||||
final_analysis = self.agent.run(final_prompt)
|
||||
execution_log["final_result"] = {
|
||||
"success": True,
|
||||
"results": context.results,
|
||||
"analysis": json.loads(
|
||||
final_analysis.replace("System:", "").strip()
|
||||
),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
execution_log["final_result"] = {
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
execution_log["end_time"] = datetime.utcnow().isoformat()
|
||||
return execution_log
|
||||
|
||||
|
||||
def calculate_investment_return(
|
||||
principal: float, rate: float, years: int
|
||||
) -> float:
|
||||
"""Calculate investment return with compound interest.
|
||||
|
||||
:param principal: Initial investment amount in dollars
|
||||
:param rate: Annual interest rate as decimal (e.g., 0.07 for 7%)
|
||||
:param years: Number of years to invest
|
||||
:return: Final investment value
|
||||
"""
|
||||
return principal * (1 + rate) ** years
|
||||
|
||||
|
||||
agent = ToolAgent(
|
||||
functions=[calculate_investment_return],
|
||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||
)
|
||||
|
||||
result = agent.run(
|
||||
"Calculate returns for $10000 invested at 7% for 10 years"
|
||||
)
|
@ -1,118 +0,0 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms import Agent, SequentialWorkflow
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
# Initialize specialized agents
|
||||
data_extractor_agent = Agent(
|
||||
agent_name="Data-Extractor",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="data_extractor_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
summarizer_agent = Agent(
|
||||
agent_name="Document-Summarizer",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="summarizer_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
financial_analyst_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="financial_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
market_analyst_agent = Agent(
|
||||
agent_name="Market-Analyst",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="market_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
operational_analyst_agent = Agent(
|
||||
agent_name="Operational-Analyst",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="operational_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
# Initialize the SwarmRouter
|
||||
router = SequentialWorkflow(
|
||||
name="pe-document-analysis-swarm",
|
||||
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||
max_loops=1,
|
||||
agents=[
|
||||
data_extractor_agent,
|
||||
summarizer_agent,
|
||||
financial_analyst_agent,
|
||||
market_analyst_agent,
|
||||
operational_analyst_agent,
|
||||
],
|
||||
output_type="all",
|
||||
)
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Run a comprehensive private equity document analysis task
|
||||
result = router.run(
|
||||
"Where is the best place to find template term sheets for series A startups. Provide links and references",
|
||||
img=None,
|
||||
)
|
||||
print(result)
|
@ -1,118 +0,0 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms import Agent, SequentialWorkflow
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
# Initialize specialized agents
|
||||
data_extractor_agent = Agent(
|
||||
agent_name="Data-Extractor",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="data_extractor_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
summarizer_agent = Agent(
|
||||
agent_name="Document-Summarizer",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="summarizer_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
financial_analyst_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="financial_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
market_analyst_agent = Agent(
|
||||
agent_name="Market-Analyst",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="market_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
operational_analyst_agent = Agent(
|
||||
agent_name="Operational-Analyst",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="operational_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
# Initialize the SwarmRouter
|
||||
router = SequentialWorkflow(
|
||||
name="pe-document-analysis-swarm",
|
||||
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||
max_loops=1,
|
||||
agents=[
|
||||
data_extractor_agent,
|
||||
summarizer_agent,
|
||||
financial_analyst_agent,
|
||||
market_analyst_agent,
|
||||
operational_analyst_agent,
|
||||
],
|
||||
output_type="all",
|
||||
)
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Run a comprehensive private equity document analysis task
|
||||
result = router.run(
|
||||
"Where is the best place to find template term sheets for series A startups. Provide links and references",
|
||||
img=None,
|
||||
)
|
||||
print(result)
|
@ -1,143 +0,0 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms import Agent, SequentialWorkflow
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
# Initialize specialized agents
|
||||
data_extractor_agent = Agent(
|
||||
agent_name="Data-Extractor",
|
||||
system_prompt="""You are a data extraction specialist. Your role is to:
|
||||
1. Extract key information, data points, and metrics from documents
|
||||
2. Identify and pull out important facts, figures, and statistics
|
||||
3. Structure extracted data in a clear, organized format
|
||||
4. Flag any inconsistencies or missing data
|
||||
5. Ensure accuracy in data extraction while maintaining context""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="data_extractor_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
summarizer_agent = Agent(
|
||||
agent_name="Document-Summarizer",
|
||||
system_prompt="""You are a document summarization expert. Your role is to:
|
||||
1. Create concise, comprehensive summaries of documents
|
||||
2. Highlight key points and main takeaways
|
||||
3. Maintain the essential meaning while reducing length
|
||||
4. Structure summaries in a logical, readable format
|
||||
5. Identify and emphasize critical insights""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="summarizer_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
financial_analyst_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
system_prompt="""You are a financial analysis expert. Your role is to:
|
||||
1. Analyze financial statements and metrics
|
||||
2. Evaluate company valuations and financial projections
|
||||
3. Assess financial risks and opportunities
|
||||
4. Provide insights on financial performance and health
|
||||
5. Make recommendations based on financial analysis""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="financial_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
market_analyst_agent = Agent(
|
||||
agent_name="Market-Analyst",
|
||||
system_prompt="""You are a market analysis expert. Your role is to:
|
||||
1. Analyze market trends and dynamics
|
||||
2. Evaluate competitive landscape and market positioning
|
||||
3. Identify market opportunities and threats
|
||||
4. Assess market size and growth potential
|
||||
5. Provide strategic market insights and recommendations""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="market_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
operational_analyst_agent = Agent(
|
||||
agent_name="Operational-Analyst",
|
||||
system_prompt="""You are an operational analysis expert. Your role is to:
|
||||
1. Analyze business operations and processes
|
||||
2. Evaluate operational efficiency and effectiveness
|
||||
3. Identify operational risks and opportunities
|
||||
4. Assess scalability and growth potential
|
||||
5. Provide recommendations for operational improvements""",
|
||||
llm=model,
|
||||
max_loops=2,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="operational_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
# Initialize the SwarmRouter
|
||||
router = SequentialWorkflow(
|
||||
name="pe-document-analysis-swarm",
|
||||
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||
max_loops=1,
|
||||
agents=[
|
||||
data_extractor_agent,
|
||||
summarizer_agent,
|
||||
financial_analyst_agent,
|
||||
market_analyst_agent,
|
||||
operational_analyst_agent,
|
||||
],
|
||||
output_type="all",
|
||||
)
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Run a comprehensive private equity document analysis task
|
||||
result = router.run(
|
||||
"Where is the best place to find template term sheets for series A startups. Provide links and references",
|
||||
no_use_clusterops=True,
|
||||
)
|
||||
print(result)
|
Loading…
Reference in new issue