Merge remote-tracking branch 'upstream/master'

# Conflicts:
#	docs/zh/swarms/examples/claude.md
#	docs/zh/swarms/examples/cohere.md
#	docs/zh/swarms/examples/deepseek.md
#	docs/zh/swarms/examples/groq.md
#	docs/zh/swarms/examples/ollama.md
#	docs/zh/swarms/examples/openai_example.md
#	docs/zh/swarms/examples/openrouter.md
#	docs/zh/swarms/examples/xai.md
pull/740/head
SigeShuo 3 months ago
commit 74e6430f3c

@ -2022,7 +2022,7 @@ Accelerate Bugs, Features, and Demos to implement by supporting us here:
Join our growing community around the world, for real-time support, ideas, and discussions on Swarms 😊 Join our growing community around the world, for real-time support, ideas, and discussions on Swarms 😊
- View our official [Blog](https://docs.swarms.world) - View our official [Blog](https://docs.swarms.world)
- Chat live with us on [Discord](https://discord.gg/kS3rwKs3ZC) - Chat live with us on [Discord](https://discord.gg/jM3Z6M9uMq)
- Follow us on [Twitter](https://twitter.com/kyegomez) - Follow us on [Twitter](https://twitter.com/kyegomez)
- Connect with us on [LinkedIn](https://www.linkedin.com/company/the-swarm-corporation) - Connect with us on [LinkedIn](https://www.linkedin.com/company/the-swarm-corporation)
- Visit us on [YouTube](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) - Visit us on [YouTube](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ)

@ -0,0 +1,70 @@
import os
from dotenv import load_dotenv
from openai import OpenAI
from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
load_dotenv()
class DeepSeekChat:
def __init__(
self,
api_key: str = os.getenv("DEEPSEEK_API_KEY"),
system_prompt: str = None,
):
self.api_key = api_key
self.client = OpenAI(
api_key=api_key, base_url="https://api.deepseek.com"
)
def run(self, task: str):
response = self.client.chat.completions.create(
model="deepseek-chat",
messages=[
{
"role": "system",
"content": "You are a helpful assistant",
},
{"role": "user", "content": task},
],
stream=False,
)
print(response)
out = response.choices[0].message.content
print(out)
return out
model = DeepSeekChat()
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
agent_description="Personal finance advisor agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
llm=model,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=3,
context_length=8192,
return_step_meta=False,
output_type="str", # "json", "dict", "csv" OR "string" "yaml" and
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
max_tokens=4000, # max output tokens
)
print(
agent.run(
"Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.",
)
)

@ -229,6 +229,15 @@ nav:
- Full API Reference: "swarms/framework/reference.md" - Full API Reference: "swarms/framework/reference.md"
- Examples: - Examples:
- Unique Swarms: "swarms/examples/unique_swarms.md" - Unique Swarms: "swarms/examples/unique_swarms.md"
- Various Model Providers:
- OpenAI: "swarms/examples/openai_example.md"
- Anthropic: "swarms/examples/claude.md"
- Groq: "swarms/examples/groq.md"
- Cohere: "swarms/examples/cohere.md"
- DeepSeek: "swarms/examples/deepseek.md"
- Ollama: "swarms/examples/ollama.md"
- OpenRouter: "swarms/examples/openrouter.md"
- XAI: "swarms/examples/xai.md"
- Swarm Models: - Swarm Models:
- Overview: "swarms/models/index.md" - Overview: "swarms/models/index.md"
# - Models Available: "swarms/models/index.md" # - Models Available: "swarms/models/index.md"

@ -0,0 +1,25 @@
# Agent with Anthropic/Claude
- Get their api keys and put it in the `.env`
- Select your model_name like `claude-3-sonnet-20240229` follows LiteLLM conventions
```python
from swarms import Agent
import os
from dotenv import load_dotenv
load_dotenv()
# Initialize the agent with ChromaDB memory
agent = Agent(
agent_name="Financial-Analysis-Agent",
model_name="claude-3-sonnet-20240229",
system_prompt="Agent system prompt here",
agent_description="Agent performs financial analysis.",
)
# Run a query
agent.run("What are the components of a startup's stock incentive equity plan?")
```

@ -0,0 +1,25 @@
# Agent with Cohere
- Add your `COHERE_API_KEY` in the `.env` file
- Select your model_name like `command-r` follows LiteLLM conventions
```python
from swarms import Agent
import os
from dotenv import load_dotenv
load_dotenv()
# Initialize the agent with ChromaDB memory
agent = Agent(
agent_name="Financial-Analysis-Agent",
model_name="command-r",
system_prompt="Agent system prompt here",
agent_description="Agent performs financial analysis.",
)
# Run a query
agent.run("What are the components of a startup's stock incentive equity plan?")
```

@ -0,0 +1,27 @@
# Agent with DeepSeek
- Add your `DEEPSEEK_API_KEY` in the `.env` file
- Select your model_name like `deepseek/deepseek-chat` follows [LiteLLM conventions](https://docs.litellm.ai/docs/providers/deepseek)
- Execute your agent!
```python
from swarms import Agent
import os
from dotenv import load_dotenv
load_dotenv()
# Initialize the agent with ChromaDB memory
agent = Agent(
agent_name="Financial-Analysis-Agent",
model_name="deepseek/deepseek-chat",
system_prompt="Agent system prompt here",
agent_description="Agent performs financial analysis.",
)
# Run a query
agent.run("What are the components of a startup's stock incentive equity plan?")
```

@ -0,0 +1,48 @@
# Agent with Groq
- Add your `GROQ_API_KEY`
```python
import os
from swarm_models import OpenAIChat
from swarms import Agent
company = "NVDA"
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize the Managing Director agent
managing_director = Agent(
agent_name="Managing-Director",
system_prompt=f"""
As the Managing Director at Blackstone, your role is to oversee the entire investment analysis process for potential acquisitions.
Your responsibilities include:
1. Setting the overall strategy and direction for the analysis
2. Coordinating the efforts of the various team members and ensuring a comprehensive evaluation
3. Reviewing the findings and recommendations from each team member
4. Making the final decision on whether to proceed with the acquisition
For the current potential acquisition of {company}, direct the tasks for the team to thoroughly analyze all aspects of the company, including its financials, industry position, technology, market potential, and regulatory compliance. Provide guidance and feedback as needed to ensure a rigorous and unbiased assessment.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="managing-director.json",
)
```

@ -0,0 +1,24 @@
# Agent with Ollama
- No API key needed
- Select your model_name like `ollama/llama2` follows [LiteLLM conventions](https://docs.litellm.ai/docs/providers/ollama)
```python
from swarms import Agent
import os
from dotenv import load_dotenv
load_dotenv()
# Initialize the agent with ChromaDB memory
agent = Agent(
agent_name="Financial-Analysis-Agent",
model_name="ollama/llama2",
system_prompt="Agent system prompt here",
agent_description="Agent performs financial analysis.",
)
# Run a query
agent.run("What are the components of a startup's stock incentive equity plan?")
```

@ -0,0 +1,16 @@
# Agent with GPT-4o-Mini
- Add `OPENAI_API_KEY="your_key"` to your `.env` file
- Select your model like `gpt-4o-mini` or `gpt-4o`
```python
from swarms import Agent
Agent(
agent_name="Stock-Analysis-Agent",
model_name="gpt-4o-mini",
max_loops="auto",
interactive=True,
streaming_on=True,
).run("What are 5 hft algorithms")
```

@ -0,0 +1,27 @@
# Agent with OpenRouter
- Add your `OPENROUTER_API_KEY` in the `.env` file
- Select your model_name like `openrouter/google/palm-2-chat-bison` follows [LiteLLM conventions](https://docs.litellm.ai/docs/providers/openrouter)
- Execute your agent!
```python
from swarms import Agent
import os
from dotenv import load_dotenv
load_dotenv()
# Initialize the agent with ChromaDB memory
agent = Agent(
agent_name="Financial-Analysis-Agent",
model_name="openrouter/google/palm-2-chat-bison",
system_prompt="Agent system prompt here",
agent_description="Agent performs financial analysis.",
)
# Run a query
agent.run("What are the components of a startup's stock incentive equity plan?")
```

@ -0,0 +1,27 @@
# Agent with XAI
- Add your `XAI_API_KEY` in the `.env` file
- Select your model_name like `xai/grok-beta` follows [LiteLLM conventions](https://docs.litellm.ai/docs/providers/xai)
- Execute your agent!
```python
from swarms import Agent
import os
from dotenv import load_dotenv
load_dotenv()
# Initialize the agent with ChromaDB memory
agent = Agent(
agent_name="Financial-Analysis-Agent",
model_name="xai/grok-beta",
system_prompt="Agent system prompt here",
agent_description="Agent performs financial analysis.",
)
# Run a query
agent.run("What are the components of a startup's stock incentive equity plan?")
```

@ -1,6 +1,9 @@
# Example usage # Example usage
from pathlib import Path from pathlib import Path
from swarms.structs.csv_to_agent import AgentLoader, AgentValidationError from swarms.structs.csv_to_agent import (
AgentLoader,
AgentValidationError,
)
if __name__ == "__main__": if __name__ == "__main__":

@ -1,252 +0,0 @@
"""
- For each diagnosis, pull lab results,
- egfr
- for each diagnosis, pull lab ranges,
- pull ranges for diagnosis
- if the diagnosis is x, then the lab ranges should be a to b
- train the agents, increase the load of input
- medical history sent to the agent
- setup rag for the agents
- run the first agent -> kidney disease -> don't know the stage -> stage 2 -> lab results -> indicative of stage 3 -> the case got elavated ->
- how to manage diseases and by looking at correlating lab, docs, diagnoses
- put docs in rag ->
- monitoring, evaluation, and treatment
- can we confirm for every diagnosis -> monitoring, evaluation, and treatment, specialized for these things
- find diagnosis -> or have diagnosis, -> for each diagnosis are there evidence of those 3 things
- swarm of those 4 agents, ->
- fda api for healthcare for commerically available papers
-
"""
from datetime import datetime
from swarms import Agent, AgentRearrange, create_file_in_folder
from swarm_models import OllamaModel
model = OllamaModel(model_name="llama3.2")
chief_medical_officer = Agent(
agent_name="Chief Medical Officer",
system_prompt="""You are the Chief Medical Officer coordinating a team of medical specialists for viral disease diagnosis.
Your responsibilities include:
- Gathering initial patient symptoms and medical history
- Coordinating with specialists to form differential diagnoses
- Synthesizing different specialist opinions into a cohesive diagnosis
- Ensuring all relevant symptoms and test results are considered
- Making final diagnostic recommendations
- Suggesting treatment plans based on team input
- Identifying when additional specialists need to be consulted
- For each diferrential diagnosis provide minimum lab ranges to meet that diagnosis or be indicative of that diagnosis minimum and maximum
Format all responses with clear sections for:
- Initial Assessment (include preliminary ICD-10 codes for symptoms)
- Differential Diagnoses (with corresponding ICD-10 codes)
- Specialist Consultations Needed
- Recommended Next Steps
""",
llm=model,
max_loops=1,
)
virologist = Agent(
agent_name="Virologist",
system_prompt="""You are a specialist in viral diseases. For each case, provide:
Clinical Analysis:
- Detailed viral symptom analysis
- Disease progression timeline
- Risk factors and complications
Coding Requirements:
- List relevant ICD-10 codes for:
* Confirmed viral conditions
* Suspected viral conditions
* Associated symptoms
* Complications
- Include both:
* Primary diagnostic codes
* Secondary condition codes
Document all findings using proper medical coding standards and include rationale for code selection.""",
llm=model,
max_loops=1,
)
internist = Agent(
agent_name="Internist",
system_prompt="""You are an Internal Medicine specialist responsible for comprehensive evaluation.
For each case, provide:
Clinical Assessment:
- System-by-system review
- Vital signs analysis
- Comorbidity evaluation
Medical Coding:
- ICD-10 codes for:
* Primary conditions
* Secondary diagnoses
* Complications
* Chronic conditions
* Signs and symptoms
- Include hierarchical condition category (HCC) codes where applicable
Document supporting evidence for each code selected.""",
llm=model,
max_loops=1,
)
medical_coder = Agent(
agent_name="Medical Coder",
system_prompt="""You are a certified medical coder responsible for:
Primary Tasks:
1. Reviewing all clinical documentation
2. Assigning accurate ICD-10 codes
3. Ensuring coding compliance
4. Documenting code justification
Coding Process:
- Review all specialist inputs
- Identify primary and secondary diagnoses
- Assign appropriate ICD-10 codes
- Document supporting evidence
- Note any coding queries
Output Format:
1. Primary Diagnosis Codes
- ICD-10 code
- Description
- Supporting documentation
2. Secondary Diagnosis Codes
- Listed in order of clinical significance
3. Symptom Codes
4. Complication Codes
5. Coding Notes""",
llm=model,
max_loops=1,
)
synthesizer = Agent(
agent_name="Diagnostic Synthesizer",
system_prompt="""You are responsible for creating the final diagnostic and coding assessment.
Synthesis Requirements:
1. Integrate all specialist findings
2. Reconcile any conflicting diagnoses
3. Verify coding accuracy and completeness
Final Report Sections:
1. Clinical Summary
- Primary diagnosis with ICD-10
- Secondary diagnoses with ICD-10
- Supporting evidence
2. Coding Summary
- Complete code list with descriptions
- Code hierarchy and relationships
- Supporting documentation
3. Recommendations
- Additional testing needed
- Follow-up care
- Documentation improvements needed
Include confidence levels and evidence quality for all diagnoses and codes.""",
llm=model,
max_loops=1,
)
# Create agent list
agents = [
chief_medical_officer,
virologist,
internist,
medical_coder,
synthesizer,
]
# Define diagnostic flow
flow = f"""{chief_medical_officer.agent_name} -> {virologist.agent_name} -> {internist.agent_name} -> {medical_coder.agent_name} -> {synthesizer.agent_name}"""
# Create the swarm system
diagnosis_system = AgentRearrange(
name="Medical-coding-diagnosis-swarm",
description="Comprehensive medical diagnosis and coding system",
agents=agents,
flow=flow,
max_loops=1,
output_type="all",
)
def generate_coding_report(diagnosis_output: str) -> str:
"""
Generate a structured medical coding report from the diagnosis output.
"""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
report = f"""# Medical Diagnosis and Coding Report
Generated: {timestamp}
## Clinical Summary
{diagnosis_output}
## Coding Summary
### Primary Diagnosis Codes
[Extracted from synthesis]
### Secondary Diagnosis Codes
[Extracted from synthesis]
### Symptom Codes
[Extracted from synthesis]
### Procedure Codes (if applicable)
[Extracted from synthesis]
## Documentation and Compliance Notes
- Code justification
- Supporting documentation references
- Any coding queries or clarifications needed
## Recommendations
- Additional documentation needed
- Suggested follow-up
- Coding optimization opportunities
"""
return report
if __name__ == "__main__":
# Example patient case
patient_case = """
Patient: 45-year-old White Male
Lab Results:
- egfr
- 59 ml / min / 1.73
- non african-american
"""
# Add timestamp to the patient case
case_info = f"Timestamp: {datetime.now()}\nPatient Information: {patient_case}"
# Run the diagnostic process
diagnosis = diagnosis_system.run(case_info)
# Generate coding report
coding_report = generate_coding_report(diagnosis)
# Create reports
create_file_in_folder(
"reports", "medical_diagnosis_report.md", diagnosis
)
create_file_in_folder(
"reports", "medical_coding_report.md", coding_report
)

@ -1,14 +0,0 @@
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from swarms.agents.openai_assistant import OpenAIAssistant
agent = OpenAIAssistant(
name="test", instructions=FINANCIAL_AGENT_SYS_PROMPT
)
print(
agent.run(
"Create a table of super high growth opportunities for AI. I have $40k to invest in ETFs, index funds, and more. Please create a table in markdown.",
)
)

@ -1,113 +0,0 @@
import os
from swarms import Agent
from swarm_models import OpenAIChat
from dotenv import load_dotenv
# Custom system prompt for VC legal document generation
VC_LEGAL_AGENT_PROMPT = """You are a specialized legal document assistant focusing on venture capital documentation.
Your role is to help draft preliminary versions of common VC legal documents while adhering to these guidelines:
1. Always include standard legal disclaimers
2. Follow standard VC document structures
3. Flag areas that need attorney review
4. Request necessary information for document completion
5. Maintain consistency across related documents
6. Output <DONE> only when document is complete and verified
Remember: All output should be marked as 'DRAFT' and require professional legal review."""
def create_vc_legal_agent():
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
# Configure the model with appropriate parameters for legal work
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize the persistent agent
agent = Agent(
agent_name="VC-Legal-Document-Agent",
system_prompt=VC_LEGAL_AGENT_PROMPT,
llm=model,
max_loops="auto", # Allows multiple iterations until completion
stopping_token="<DONE>", # Agent will continue until this token is output
autosave=True,
dashboard=True, # Enable dashboard for monitoring
verbose=True,
dynamic_temperature_enabled=False, # Disable for consistency in legal documents
saved_state_path="vc_legal_agent_state.json",
user_name="legal_corp",
retry_attempts=3,
context_length=200000,
return_step_meta=True,
output_type="string",
streaming_on=False,
)
return agent
def generate_legal_document(agent, document_type, parameters):
"""
Generate a legal document with multiple refinement iterations
Args:
agent: The initialized VC legal agent
document_type: Type of document to generate (e.g., "term_sheet", "investment_agreement")
parameters: Dict containing necessary parameters for the document
Returns:
str: The generated document content
"""
prompt = f"""
Generate a {document_type} with the following parameters:
{parameters}
Please follow these steps:
1. Create initial draft
2. Review for completeness
3. Add necessary legal disclaimers
4. Verify all required sections
5. Output <DONE> when complete
Include [REQUIRES LEGAL REVIEW] tags for sections needing attorney attention.
"""
return agent.run(prompt)
# Example usage
if __name__ == "__main__":
# Initialize the agent
legal_agent = create_vc_legal_agent()
# Example parameters for a term sheet
parameters = {
"company_name": "TechStartup Inc.",
"investment_amount": "$5,000,000",
"valuation": "$20,000,000",
"investor_rights": [
"Board seat",
"Pro-rata rights",
"Information rights",
],
"type_of_security": "Series A Preferred Stock",
}
# Generate a term sheet
document = generate_legal_document(
legal_agent, "term_sheet", parameters
)
# Save the generated document
with open("generated_term_sheet_draft.md", "w") as f:
f.write(document)

@ -1,263 +0,0 @@
import os
from swarms import Agent, AgentRearrange
from swarm_models import OpenAIChat
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the matchmaker agent (Director)
matchmaker_agent = Agent(
agent_name="MatchmakerAgent",
system_prompt="""
<agent_role>
You are the MatchmakerAgent, the primary coordinator for managing user profiles and facilitating meaningful connections while maintaining strict privacy standards.
</agent_role>
<privacy_guidelines>
<restricted_information>
- Full names
- Contact information (phone, email, social media)
- Exact location/address
- Financial information
- Personal identification numbers
- Workplace specifics
</restricted_information>
<shareable_information>
- First name only
- Age range (not exact birth date)
- General location (city/region only)
- Interests and hobbies
- Relationship goals
- General profession category
</shareable_information>
</privacy_guidelines>
<core_responsibilities>
<task>Profile_Management</task>
<description>
- Review and verify user profiles for authenticity
- Ensure all shared information adheres to privacy guidelines
- Flag any potential security concerns
</description>
<task>Match_Coordination</task>
<description>
- Analyze compatibility factors between users
- Prioritize matches based on shared interests and goals
- Monitor interaction patterns for safety and satisfaction
</description>
<task>Communication_Flow</task>
<description>
- Coordinate information exchange between ProfileAnalyzer and ConnectionFacilitator
- Ensure smooth transition of approved information
- Maintain audit trail of information sharing
</description>
</core_responsibilities>
<ethical_guidelines>
<principle>Consent_First</principle>
<description>Never share information without explicit user consent</description>
<principle>Safety_Priority</principle>
<description>Prioritize user safety and privacy over match potential</description>
<principle>Transparency</principle>
<description>Be clear about what information is being shared and why</description>
</ethical_guidelines>
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="matchmaker_agent.json",
)
# Initialize worker 1: Profile Analyzer
profile_analyzer = Agent(
agent_name="ProfileAnalyzer",
system_prompt="""
<agent_role>
You are the ProfileAnalyzer, responsible for deeply understanding user profiles and identifying meaningful compatibility factors while maintaining strict privacy protocols.
</agent_role>
<data_handling>
<sensitive_data>
<storage>
- All sensitive information must be encrypted
- Access logs must be maintained
- Data retention policies must be followed
</storage>
<processing>
- Use anonymized IDs for internal processing
- Apply privacy-preserving analysis techniques
- Implement data minimization principles
</processing>
</sensitive_data>
<analysis_parameters>
<compatibility_metrics>
- Shared interests alignment
- Relationship goal compatibility
- Value system overlap
- Lifestyle compatibility
- Communication style matching
</compatibility_metrics>
<red_flags>
- Inconsistent information
- Suspicious behavior patterns
- Policy violations
- Safety concerns
</red_flags>
</analysis_parameters>
</data_handling>
<output_guidelines>
<match_analysis>
- Generate compatibility scores
- Identify shared interests and potential conversation starters
- Flag potential concerns for review
- Provide reasoning for match recommendations
</match_analysis>
<privacy_filters>
- Apply progressive information disclosure rules
- Implement multi-stage verification for sensitive data sharing
- Maintain audit trails of information access
</privacy_filters>
</output_guidelines>
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="profile_analyzer.json",
)
# Initialize worker 2: Connection Facilitator
connection_facilitator = Agent(
agent_name="ConnectionFacilitator",
system_prompt="""
<agent_role>
You are the ConnectionFacilitator, responsible for managing the interaction between matched users and ensuring smooth, safe, and meaningful communication.
</agent_role>
<communication_protocols>
<stages>
<stage name="initial_contact">
- Manage introduction messages
- Monitor response patterns
- Flag any concerning behavior
</stage>
<stage name="ongoing_interaction">
- Track engagement levels
- Identify conversation quality indicators
- Provide conversation suggestions when appropriate
</stage>
<stage name="milestone_tracking">
- Monitor relationship progression
- Record user feedback
- Update matching algorithms based on successful connections
</stage>
</stages>
<safety_measures>
<content_filtering>
- Screen for inappropriate content
- Block prohibited information sharing
- Monitor for harassment or abuse
</content_filtering>
<privacy_protection>
- Implement progressive contact information sharing
- Maintain anonymized communication channels
- Protect user identity until mutual consent
</privacy_protection>
</safety_measures>
</communication_protocols>
<feedback_system>
<metrics>
- User engagement rates
- Communication quality scores
- Safety incident reports
- User satisfaction ratings
</metrics>
<improvement_loop>
- Collect interaction data
- Analyze success patterns
- Implement refinements to matching criteria
- Update safety protocols as needed
</improvement_loop>
</feedback_system>
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="connection_facilitator.json",
)
# Swarm-Level Prompt (Collaboration Prompt)
swarm_prompt = """
As a dating platform swarm, your collective goal is to facilitate meaningful connections while maintaining
the highest standards of privacy and safety. The MatchmakerAgent oversees the entire matching process,
coordinating between the ProfileAnalyzer who deeply understands user compatibility, and the ConnectionFacilitator
who manages the development of connections. Together, you must ensure that:
1. User privacy is maintained at all times
2. Information is shared progressively and with consent
3. Safety protocols are strictly followed
4. Meaningful connections are prioritized over quantity
5. User experience remains positive and engaging
"""
# Create a list of agents
agents = [matchmaker_agent, profile_analyzer, connection_facilitator]
# Define the flow pattern for the swarm
flow = "MatchmakerAgent -> ProfileAnalyzer -> ConnectionFacilitator"
# Using AgentRearrange class to manage the swarm
agent_system = AgentRearrange(
name="dating-swarm",
description="Privacy-focused dating platform agent system",
agents=agents,
flow=flow,
return_json=False,
output_type="final",
max_loops=1,
)
# Example task for the swarm
task = f"""
{swarm_prompt}
Process a new batch of user profiles and identify potential matches while ensuring all privacy protocols
are followed. For each potential match, provide compatibility reasoning and suggested conversation
starters without revealing any restricted information.
"""
# Run the swarm system with the task
output = agent_system.run(task)
print(output)

@ -136,6 +136,7 @@ class AgentValidator:
str(e), str(e.__class__.__name__), str(config) str(e), str(e.__class__.__name__), str(config)
) )
class AgentLoader: class AgentLoader:
"""Class to manage agents through CSV with type safety""" """Class to manage agents through CSV with type safety"""
@ -202,7 +203,9 @@ class AgentLoader:
elif file_type == "json": elif file_type == "json":
return self._load_agents_from_json() return self._load_agents_from_json()
else: else:
raise ValueError("Unsupported file type. Use 'csv' or 'json'.") raise ValueError(
"Unsupported file type. Use 'csv' or 'json'."
)
def _load_agents_from_csv(self) -> List[Agent]: def _load_agents_from_csv(self) -> List[Agent]:
"""Load agents from a CSV file""" """Load agents from a CSV file"""
@ -229,13 +232,13 @@ class AgentLoader:
"""Load agents from a JSON file""" """Load agents from a JSON file"""
import json import json
if not self.csv_path.with_suffix('.json').exists(): if not self.csv_path.with_suffix(".json").exists():
raise FileNotFoundError( raise FileNotFoundError(
f"JSON file not found at {self.csv_path.with_suffix('.json')}" f"JSON file not found at {self.csv_path.with_suffix('.json')}"
) )
agents: List[Agent] = [] agents: List[Agent] = []
with open(self.csv_path.with_suffix('.json'), "r") as f: with open(self.csv_path.with_suffix(".json"), "r") as f:
agents_data = json.load(f) agents_data = json.load(f)
for agent in agents_data: for agent in agents_data:
try: try:
@ -250,10 +253,14 @@ class AgentLoader:
) )
continue continue
print(f"Loaded {len(agents)} agents from {self.csv_path.with_suffix('.json')}") print(
f"Loaded {len(agents)} agents from {self.csv_path.with_suffix('.json')}"
)
return agents return agents
def _create_agent(self, validated_config: AgentConfigDict) -> Agent: def _create_agent(
self, validated_config: AgentConfigDict
) -> Agent:
"""Create an Agent instance from validated configuration""" """Create an Agent instance from validated configuration"""
return Agent( return Agent(
agent_name=validated_config["agent_name"], agent_name=validated_config["agent_name"],
@ -263,7 +270,9 @@ class AgentLoader:
autosave=validated_config["autosave"], autosave=validated_config["autosave"],
dashboard=validated_config["dashboard"], dashboard=validated_config["dashboard"],
verbose=validated_config["verbose"], verbose=validated_config["verbose"],
dynamic_temperature_enabled=validated_config["dynamic_temperature"], dynamic_temperature_enabled=validated_config[
"dynamic_temperature"
],
saved_state_path=validated_config["saved_state_path"], saved_state_path=validated_config["saved_state_path"],
user_name=validated_config["user_name"], user_name=validated_config["user_name"],
retry_attempts=validated_config["retry_attempts"], retry_attempts=validated_config["retry_attempts"],

@ -54,7 +54,3 @@ def bootup():
except Exception as e: except Exception as e:
logger.error(f"Error during bootup: {str(e)}") logger.error(f"Error during bootup: {str(e)}")
raise raise
# Run bootup
bootup()

Loading…
Cancel
Save