parent
5ee98b6b3a
commit
8e374b15be
@ -0,0 +1,68 @@
|
||||
import os
|
||||
|
||||
from swarms import Agent
|
||||
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms.structs.agents_available import showcase_available_agents
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the Claims Director agent
|
||||
director_agent = Agent(
|
||||
agent_name="ClaimsDirector",
|
||||
agent_description="Oversees and coordinates the medical insurance claims processing workflow",
|
||||
system_prompt="""You are the Claims Director responsible for managing the medical insurance claims process.
|
||||
Assign and prioritize tasks between claims processors and auditors. Ensure claims are handled efficiently
|
||||
and accurately while maintaining compliance with insurance policies and regulations.""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="director_agent.json",
|
||||
)
|
||||
|
||||
# Initialize Claims Processor agent
|
||||
processor_agent = Agent(
|
||||
agent_name="ClaimsProcessor",
|
||||
agent_description="Reviews and processes medical insurance claims, verifying coverage and eligibility",
|
||||
system_prompt="""Review medical insurance claims for completeness and accuracy. Verify patient eligibility,
|
||||
coverage details, and process claims according to policy guidelines. Flag any claims requiring special review.""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="processor_agent.json",
|
||||
)
|
||||
|
||||
# Initialize Claims Auditor agent
|
||||
auditor_agent = Agent(
|
||||
agent_name="ClaimsAuditor",
|
||||
agent_description="Audits processed claims for accuracy and compliance with policies and regulations",
|
||||
system_prompt="""Audit processed insurance claims for accuracy and compliance. Review claim decisions,
|
||||
identify potential fraud or errors, and ensure all processing follows established guidelines and regulations.""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="auditor_agent.json",
|
||||
)
|
||||
|
||||
# Create a list of agents
|
||||
agents = [director_agent, processor_agent, auditor_agent]
|
||||
|
||||
print(showcase_available_agents(agents=agents))
|
@ -0,0 +1,99 @@
|
||||
import os
|
||||
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
from swarms import Agent, run_agents_with_tasks_concurrently
|
||||
|
||||
# Fetch the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize agents for different roles
|
||||
delaware_ccorp_agent = Agent(
|
||||
agent_name="Delaware-CCorp-Hiring-Agent",
|
||||
system_prompt="""
|
||||
Create a comprehensive hiring description for a Delaware C Corporation,
|
||||
including all relevant laws and regulations, such as the Delaware General
|
||||
Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description
|
||||
covers the requirements for hiring employees, contractors, and officers,
|
||||
including the necessary paperwork, tax obligations, and benefits. Also,
|
||||
outline the procedures for compliance with Delaware's employment laws,
|
||||
including anti-discrimination laws, workers' compensation, and unemployment
|
||||
insurance. Provide guidance on how to navigate the complexities of Delaware's
|
||||
corporate law and ensure that all hiring practices are in compliance with
|
||||
state and federal regulations.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
output_type="str",
|
||||
artifacts_on=True,
|
||||
artifacts_output_path="delaware_ccorp_hiring_description.md",
|
||||
artifacts_file_extension=".md",
|
||||
)
|
||||
|
||||
indian_foreign_agent = Agent(
|
||||
agent_name="Indian-Foreign-Hiring-Agent",
|
||||
system_prompt="""
|
||||
Create a comprehensive hiring description for an Indian or foreign country,
|
||||
including all relevant laws and regulations, such as the Indian Contract Act,
|
||||
the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA).
|
||||
Ensure the description covers the requirements for hiring employees,
|
||||
contractors, and officers, including the necessary paperwork, tax obligations,
|
||||
and benefits. Also, outline the procedures for compliance with Indian and
|
||||
foreign employment laws, including anti-discrimination laws, workers'
|
||||
compensation, and unemployment insurance. Provide guidance on how to navigate
|
||||
the complexities of Indian and foreign corporate law and ensure that all hiring
|
||||
practices are in compliance with state and federal regulations. Consider the
|
||||
implications of hiring foreign nationals and the requirements for obtaining
|
||||
necessary visas and work permits.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
output_type="str",
|
||||
artifacts_on=True,
|
||||
artifacts_output_path="indian_foreign_hiring_description.md",
|
||||
artifacts_file_extension=".md",
|
||||
)
|
||||
|
||||
# List of agents and corresponding tasks
|
||||
agents = [delaware_ccorp_agent, indian_foreign_agent]
|
||||
tasks = [
|
||||
"""
|
||||
Create a comprehensive hiring description for an Agent Engineer, including
|
||||
required skills and responsibilities. Ensure the description covers the
|
||||
necessary technical expertise, such as proficiency in AI/ML frameworks,
|
||||
programming languages, and data structures. Outline the key responsibilities,
|
||||
including designing and developing AI agents, integrating with existing systems,
|
||||
and ensuring scalability and performance.
|
||||
""",
|
||||
"""
|
||||
Generate a detailed job description for a Prompt Engineer, including
|
||||
required skills and responsibilities. Ensure the description covers the
|
||||
necessary technical expertise, such as proficiency in natural language processing,
|
||||
machine learning, and software development. Outline the key responsibilities,
|
||||
including designing and optimizing prompts for AI systems, ensuring prompt
|
||||
quality and consistency, and collaborating with cross-functional teams.
|
||||
""",
|
||||
]
|
||||
|
||||
# Run agents with tasks concurrently
|
||||
results = run_agents_with_tasks_concurrently(
|
||||
agents,
|
||||
tasks,
|
||||
all_cores=True,
|
||||
device="cpu",
|
||||
)
|
||||
|
||||
# Print the results
|
||||
for result in results:
|
||||
print(result)
|
@ -0,0 +1,59 @@
|
||||
# Swarms 6.0.0 - Performance & Reliability Update 🚀
|
||||
|
||||
We're excited to announce the release of Swarms 6.0.0, bringing significant improvements to performance, reliability, and developer experience. This release focuses on streamlining core functionalities while enhancing the overall stability of the framework.
|
||||
|
||||
## 📦 Installation
|
||||
|
||||
```bash
|
||||
pip3 install -U swarms
|
||||
```
|
||||
|
||||
## 🌟 Highlights
|
||||
|
||||
### Agent Enhancements
|
||||
- **Improved RAG Performance**: Significant improvements to Retrieval-Augmented Generation capabilities
|
||||
- **Enhanced Prompt Generation**: Auto-generate prompt now incorporates name, description, and system prompt for more contextual interactions
|
||||
- **Streamlined Architecture**: Cleaned up unused code for better performance and maintainability
|
||||
- **Simplified State Management**: Consolidated state management methods into a single `load()` function
|
||||
|
||||
### Tools & Execution
|
||||
- **Optimized Environment Management**: Fixed multiple environment instantiation issue
|
||||
- Environments now initialize once during `__init__`
|
||||
- **New SwarmRouter Function**: Simplified routing mechanism
|
||||
- Returns consolidated string output from all agents
|
||||
- Improved coordination between swarm components
|
||||
|
||||
## 💪 Performance Improvements
|
||||
- Faster execution times
|
||||
- Reduced memory footprint
|
||||
- More reliable logging system
|
||||
- Lightweight and efficient codebase
|
||||
|
||||
## 🤝 Join Our Community
|
||||
|
||||
### We're Hiring!
|
||||
Join our growing team! We're currently looking for:
|
||||
- Agent Engineers
|
||||
- Developer Relations
|
||||
- Infrastructure Engineers
|
||||
- And more!
|
||||
|
||||
### Get Involved
|
||||
- ⭐ Star our repository
|
||||
- 🔄 Fork the project
|
||||
- 🛠 Submit pull requests
|
||||
- 🐛 Report issues
|
||||
- 💡 Share your ideas
|
||||
|
||||
### Contact & Support
|
||||
- 📧 Email: kye@swarms.world
|
||||
- 🔗 Issues: [GitHub Issues](https://github.com/kyegomez/swarms/issues)
|
||||
|
||||
## 🔜 What's Next?
|
||||
Have ideas for features, bug fixes, or improvements? We'd love to hear from you! Reach out through our GitHub issues or email us directly.
|
||||
|
||||
---
|
||||
|
||||
*Thank you to all our contributors and users who make Swarms better every day. Together, we're building the future of swarm intelligence.*
|
||||
|
||||
#SwarmAI #OpenSource #AI #MachineLearning
|
@ -1,238 +1,231 @@
|
||||
# GroupChat
|
||||
# GroupChat Class Documentation
|
||||
|
||||
The `GroupChat` class is designed to manage a group chat session involving multiple agents. This class handles initializing the conversation, selecting the next speaker, resetting the chat, and executing the chat rounds, providing a structured approach to managing a dynamic and interactive conversation.
|
||||
|
||||
### Key Concepts
|
||||
The GroupChat class manages multi-agent conversations with state persistence, comprehensive logging, and flexible agent configurations. It supports both Agent class instances and callable functions, making it versatile for different use cases.
|
||||
|
||||
## Installation
|
||||
```bash
|
||||
pip install swarms python-dotenv pydantic
|
||||
```
|
||||
|
||||
- **Agents**: Entities participating in the group chat.
|
||||
- **Conversation Management**: Handling the flow of conversation, selecting speakers, and maintaining chat history.
|
||||
- **Round-based Execution**: Managing the chat in predefined rounds.
|
||||
|
||||
## Attributes
|
||||
|
||||
### Arguments
|
||||
|
||||
| Argument | Type | Default | Description |
|
||||
|---------------------|----------------------|-------------|-------------|
|
||||
| `agents` | `List[Agent]` | `None` | List of agents participating in the group chat. |
|
||||
| `max_rounds` | `int` | `10` | Maximum number of chat rounds. |
|
||||
| `admin_name` | `str` | `"Admin"` | Name of the admin user. |
|
||||
| `group_objective` | `str` | `None` | Objective of the group chat. |
|
||||
| `selector_agent` | `Agent` | `None` | Agent responsible for selecting the next speaker. |
|
||||
| `rules` | `str` | `None` | Rules for the group chat. |
|
||||
| `*args` | | | Variable length argument list. |
|
||||
| `**kwargs` | | | Arbitrary keyword arguments. |
|
||||
|
||||
### Attributes
|
||||
|
||||
| Attribute | Type | Description |
|
||||
|---------------------|----------------------|-------------|
|
||||
| `agents` | `List[Agent]` | List of agents participating in the group chat. |
|
||||
| `max_rounds` | `int` | Maximum number of chat rounds. |
|
||||
| `admin_name` | `str` | Name of the admin user. |
|
||||
| `group_objective` | `str` | Objective of the group chat. |
|
||||
| `selector_agent` | `Agent` | Agent responsible for selecting the next speaker. |
|
||||
| `messages` | `Conversation` | Conversation object for storing the chat messages. |
|
||||
| Attribute | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| state_path | str | Path for saving/loading chat state |
|
||||
| wrapped_agents | List[AgentWrapper] | List of wrapped agent instances |
|
||||
| selector_agent | AgentWrapper | Agent responsible for speaker selection |
|
||||
| state | GroupChatState | Current state of the group chat |
|
||||
|
||||
## Methods
|
||||
|
||||
### __init__
|
||||
|
||||
Initializes the group chat with the given parameters.
|
||||
|
||||
**Examples:**
|
||||
### Core Methods
|
||||
|
||||
```python
|
||||
agents = [Agent(name="Agent 1"), Agent(name="Agent 2")]
|
||||
group_chat = GroupChat(agents=agents, max_rounds=5, admin_name="GroupAdmin")
|
||||
```
|
||||
|
||||
### agent_names
|
||||
|
||||
Returns the names of the agents in the group chat.
|
||||
def run(self, task: str) -> str:
|
||||
"""Execute the group chat conversation"""
|
||||
|
||||
**Returns:**
|
||||
def save_state(self) -> None:
|
||||
"""Save current state to disk"""
|
||||
|
||||
| Return Type | Description |
|
||||
|-------------|-------------|
|
||||
| `List[str]` | List of agent names. |
|
||||
@classmethod
|
||||
def load_state(cls, state_path: str) -> 'GroupChat':
|
||||
"""Load GroupChat from saved state"""
|
||||
|
||||
**Examples:**
|
||||
def get_conversation_summary(self) -> Dict[str, Any]:
|
||||
"""Return a summary of the conversation"""
|
||||
|
||||
```python
|
||||
names = group_chat.agent_names
|
||||
print(names) # Output: ['Agent 1', 'Agent 2']
|
||||
def export_conversation(self, format: str = "json") -> Union[str, Dict]:
|
||||
"""Export the conversation in specified format"""
|
||||
```
|
||||
|
||||
### reset
|
||||
|
||||
Resets the group chat by clearing the message history.
|
||||
|
||||
**Examples:**
|
||||
### Internal Methods
|
||||
|
||||
```python
|
||||
group_chat.reset()
|
||||
```
|
||||
|
||||
### agent_by_name
|
||||
|
||||
Finds an agent whose name is contained within the given name string.
|
||||
|
||||
**Arguments:**
|
||||
def _log_interaction(self, agent_name: str, position: int, input_text: str, output_text: str) -> None:
|
||||
"""Log a single interaction"""
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|--------|-------------|
|
||||
| `name` | `str` | Name string to search for. |
|
||||
def _add_message(self, role: str, content: str) -> None:
|
||||
"""Add a message to the conversation history"""
|
||||
|
||||
**Returns:**
|
||||
|
||||
| Return Type | Description |
|
||||
|-------------|-------------|
|
||||
| `Agent` | Agent object with a name contained in the given name string. |
|
||||
|
||||
**Raises:**
|
||||
|
||||
- `ValueError`: If no agent is found with a name contained in the given name string.
|
||||
def select_next_speaker(self, last_speaker: AgentWrapper) -> AgentWrapper:
|
||||
"""Select the next speaker using the selector agent"""
|
||||
```
|
||||
|
||||
**Examples:**
|
||||
## Usage Examples
|
||||
|
||||
### 1. Basic Setup with Two Agents
|
||||
```python
|
||||
agent = group_chat.agent_by_name("Agent 1")
|
||||
print(agent.agent_name) # Output: 'Agent 1'
|
||||
import os
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
# Initialize OpenAI
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
model = OpenAIChat(openai_api_key=api_key, model_name="gpt-4-mini")
|
||||
|
||||
# Create agents
|
||||
analyst = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
system_prompt="You are a financial analyst...",
|
||||
llm=model
|
||||
)
|
||||
|
||||
advisor = Agent(
|
||||
agent_name="Investment-Advisor",
|
||||
system_prompt="You are an investment advisor...",
|
||||
llm=model
|
||||
)
|
||||
|
||||
# Create group chat
|
||||
chat = GroupChat(
|
||||
name="Investment Team",
|
||||
agents=[analyst, advisor],
|
||||
max_rounds=5,
|
||||
group_objective="Provide investment advice"
|
||||
)
|
||||
|
||||
response = chat.run("What's the best investment strategy for retirement?")
|
||||
```
|
||||
|
||||
### next_agent
|
||||
|
||||
Returns the next agent in the list.
|
||||
|
||||
**Arguments:**
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|--------|-------------|
|
||||
| `agent` | `Agent`| Current agent. |
|
||||
|
||||
**Returns:**
|
||||
|
||||
| Return Type | Description |
|
||||
|-------------|-------------|
|
||||
| `Agent` | Next agent in the list. |
|
||||
|
||||
**Examples:**
|
||||
|
||||
### 2. Advanced Setup with State Management
|
||||
```python
|
||||
current_agent = group_chat.agents[0]
|
||||
next_agent = group_chat.next_agent(current_agent)
|
||||
print(next_agent.agent_name) # Output: Name of the next agent
|
||||
# Create group chat with state persistence
|
||||
chat = GroupChat(
|
||||
name="Investment Advisory Team",
|
||||
description="Expert team for financial planning",
|
||||
agents=[analyst, advisor, tax_specialist],
|
||||
max_rounds=10,
|
||||
admin_name="Senior Advisor",
|
||||
group_objective="Provide comprehensive financial planning",
|
||||
state_path="investment_chat_state.json",
|
||||
rules="1. Always provide sources\n2. Be concise\n3. Focus on practical advice"
|
||||
)
|
||||
|
||||
# Run chat and save state
|
||||
response = chat.run("Create a retirement plan for a 35-year old")
|
||||
chat.save_state()
|
||||
|
||||
# Load existing chat state
|
||||
loaded_chat = GroupChat.load_state("investment_chat_state.json")
|
||||
```
|
||||
|
||||
### select_speaker_msg
|
||||
|
||||
Returns the message for selecting the next speaker.
|
||||
|
||||
**Returns:**
|
||||
|
||||
| Return Type | Description |
|
||||
|-------------|-------------|
|
||||
| `str` | Prompt message for selecting the next speaker. |
|
||||
|
||||
**Examples:**
|
||||
|
||||
### 3. Using Custom Callable Agents
|
||||
```python
|
||||
message = group_chat.select_speaker_msg()
|
||||
print(message)
|
||||
def custom_agent(input_text: str) -> str:
|
||||
# Custom logic here
|
||||
return f"Processed: {input_text}"
|
||||
|
||||
# Mix of regular agents and callable functions
|
||||
chat = GroupChat(
|
||||
name="Hybrid Team",
|
||||
agents=[analyst, custom_agent],
|
||||
max_rounds=3
|
||||
)
|
||||
```
|
||||
|
||||
### select_speaker
|
||||
|
||||
Selects the next speaker.
|
||||
|
||||
**Arguments:**
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|----------------------|--------|-------------|
|
||||
| `last_speaker_agent` | `Agent`| Last speaker in the conversation. |
|
||||
| `selector_agent` | `Agent`| Agent responsible for selecting the next speaker. |
|
||||
|
||||
**Returns:**
|
||||
### 4. Export and Analysis
|
||||
```python
|
||||
# Run chat
|
||||
chat.run("Analyze market conditions")
|
||||
|
||||
| Return Type | Description |
|
||||
|-------------|-------------|
|
||||
| `Agent` | Next speaker. |
|
||||
# Get summary
|
||||
summary = chat.get_conversation_summary()
|
||||
print(summary)
|
||||
|
||||
**Examples:**
|
||||
# Export in different formats
|
||||
json_conv = chat.export_conversation(format="json")
|
||||
text_conv = chat.export_conversation(format="text")
|
||||
```
|
||||
|
||||
### 5. Advanced Configuration with Custom Selector
|
||||
```python
|
||||
next_speaker = group_chat.select_speaker(last_speaker_agent, selector_agent)
|
||||
print(next_speaker.agent_name)
|
||||
class CustomSelector(Agent):
|
||||
def run(self, input_text: str) -> str:
|
||||
# Custom selection logic
|
||||
return "Financial-Analyst"
|
||||
|
||||
chat = GroupChat(
|
||||
name="Custom Selection Team",
|
||||
agents=[analyst, advisor],
|
||||
selector_agent=CustomSelector(
|
||||
agent_name="Custom-Selector",
|
||||
system_prompt="Select the next speaker based on expertise",
|
||||
llm=model
|
||||
),
|
||||
max_rounds=5
|
||||
)
|
||||
```
|
||||
|
||||
### _participant_roles
|
||||
|
||||
Returns the roles of the participants.
|
||||
|
||||
**Returns:**
|
||||
|
||||
| Return Type | Description |
|
||||
|-------------|-------------|
|
||||
| `str` | Participant roles. |
|
||||
|
||||
**Examples:**
|
||||
|
||||
### 6. Debugging Setup
|
||||
```python
|
||||
roles = group_chat._participant_roles()
|
||||
print(roles)
|
||||
import logging
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
chat = GroupChat(
|
||||
name="Debug Team",
|
||||
agents=[analyst, advisor],
|
||||
max_rounds=3,
|
||||
state_path="debug_chat.json"
|
||||
)
|
||||
|
||||
# Run with detailed logging
|
||||
try:
|
||||
response = chat.run("Complex query")
|
||||
except Exception as e:
|
||||
logger.error(f"Chat failed: {str(e)}")
|
||||
# Access last successful state
|
||||
state = chat.state
|
||||
```
|
||||
|
||||
### __call__
|
||||
## Error Handling
|
||||
|
||||
Executes the group chat as a function.
|
||||
|
||||
**Arguments:**
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|--------|-------------|
|
||||
| `task` | `str` | Task to be performed. |
|
||||
|
||||
**Returns:**
|
||||
|
||||
| Return Type | Description |
|
||||
|-------------|-------------|
|
||||
| `str` | Reply from the last speaker. |
|
||||
|
||||
**Examples:**
|
||||
The GroupChat class includes comprehensive error handling:
|
||||
|
||||
```python
|
||||
response = group_chat(task="Discuss the project plan")
|
||||
print(response)
|
||||
try:
|
||||
chat = GroupChat(agents=[analyst]) # Will raise ValueError
|
||||
except ValueError as e:
|
||||
print("Configuration error:", str(e))
|
||||
|
||||
try:
|
||||
response = chat.run("Query")
|
||||
except Exception as e:
|
||||
# Access error state
|
||||
error_summary = chat.get_conversation_summary()
|
||||
print("Execution error:", str(e))
|
||||
print("State at error:", error_summary)
|
||||
```
|
||||
|
||||
### Additional Examples
|
||||
## Best Practices
|
||||
|
||||
#### Example 1: Initializing and Running a Group Chat
|
||||
1. **State Management**:
|
||||
- Always specify a `state_path` for important conversations
|
||||
- Use `save_state()` after critical operations
|
||||
- Implement regular state backups for long conversations
|
||||
|
||||
```python
|
||||
agents = [Agent(name="Agent 1"), Agent(name="Agent 2"), Agent(name="Agent 3")]
|
||||
selector_agent = Agent(name="Selector")
|
||||
group_chat = GroupChat(agents=agents, selector_agent=selector_agent, max_rounds=3, group_objective="Discuss the quarterly goals.")
|
||||
2. **Agent Configuration**:
|
||||
- Provide clear system prompts for each agent
|
||||
- Use descriptive agent names
|
||||
- Consider agent expertise when setting the group objective
|
||||
|
||||
response = group_chat(task="Let's start the discussion on quarterly goals.")
|
||||
print(response)
|
||||
```
|
||||
3. **Performance**:
|
||||
- Keep `max_rounds` reasonable (5-10 for most cases)
|
||||
- Use early stopping conditions when possible
|
||||
- Monitor conversation length and complexity
|
||||
|
||||
#### Example 2: Resetting the Group Chat
|
||||
4. **Error Handling**:
|
||||
- Always wrap chat execution in try-except blocks
|
||||
- Implement proper logging
|
||||
- Save states before potentially risky operations
|
||||
|
||||
```python
|
||||
group_chat.reset()
|
||||
```
|
||||
|
||||
#### Example 3: Selecting the Next Speaker
|
||||
|
||||
```python
|
||||
last_speaker = group_chat.agents[0]
|
||||
next_speaker = group_chat.select_speaker(last_speaker_agent=last_speaker, selector_agent=selector_agent)
|
||||
print(next_speaker.agent_name)
|
||||
```
|
||||
## Limitations
|
||||
|
||||
## Summary
|
||||
- Agents must either have a `run` method or be callable
|
||||
- State files can grow large with many interactions
|
||||
- Selector agent may need optimization for large agent groups
|
||||
- Real-time streaming not supported in basic configuration
|
||||
|
||||
The `GroupChat` class offers a structured approach to managing a group chat involving multiple agents. With functionalities for initializing conversations, selecting speakers, and handling chat rounds, it provides a robust framework for dynamic and interactive discussions. This makes it an essential tool for applications requiring coordinated communication among multiple agents.
|
@ -0,0 +1,113 @@
|
||||
import os
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Custom system prompt for VC legal document generation
|
||||
VC_LEGAL_AGENT_PROMPT = """You are a specialized legal document assistant focusing on venture capital documentation.
|
||||
Your role is to help draft preliminary versions of common VC legal documents while adhering to these guidelines:
|
||||
|
||||
1. Always include standard legal disclaimers
|
||||
2. Follow standard VC document structures
|
||||
3. Flag areas that need attorney review
|
||||
4. Request necessary information for document completion
|
||||
5. Maintain consistency across related documents
|
||||
6. Output <DONE> only when document is complete and verified
|
||||
|
||||
Remember: All output should be marked as 'DRAFT' and require professional legal review."""
|
||||
|
||||
|
||||
def create_vc_legal_agent():
|
||||
load_dotenv()
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Configure the model with appropriate parameters for legal work
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
# Initialize the persistent agent
|
||||
agent = Agent(
|
||||
agent_name="VC-Legal-Document-Agent",
|
||||
system_prompt=VC_LEGAL_AGENT_PROMPT,
|
||||
llm=model,
|
||||
max_loops="auto", # Allows multiple iterations until completion
|
||||
stopping_token="<DONE>", # Agent will continue until this token is output
|
||||
autosave=True,
|
||||
dashboard=True, # Enable dashboard for monitoring
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=False, # Disable for consistency in legal documents
|
||||
saved_state_path="vc_legal_agent_state.json",
|
||||
user_name="legal_corp",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
return_step_meta=True,
|
||||
output_type="string",
|
||||
streaming_on=False,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
|
||||
def generate_legal_document(agent, document_type, parameters):
|
||||
"""
|
||||
Generate a legal document with multiple refinement iterations
|
||||
|
||||
Args:
|
||||
agent: The initialized VC legal agent
|
||||
document_type: Type of document to generate (e.g., "term_sheet", "investment_agreement")
|
||||
parameters: Dict containing necessary parameters for the document
|
||||
|
||||
Returns:
|
||||
str: The generated document content
|
||||
"""
|
||||
prompt = f"""
|
||||
Generate a {document_type} with the following parameters:
|
||||
{parameters}
|
||||
|
||||
Please follow these steps:
|
||||
1. Create initial draft
|
||||
2. Review for completeness
|
||||
3. Add necessary legal disclaimers
|
||||
4. Verify all required sections
|
||||
5. Output <DONE> when complete
|
||||
|
||||
Include [REQUIRES LEGAL REVIEW] tags for sections needing attorney attention.
|
||||
"""
|
||||
|
||||
return agent.run(prompt)
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Initialize the agent
|
||||
legal_agent = create_vc_legal_agent()
|
||||
|
||||
# Example parameters for a term sheet
|
||||
parameters = {
|
||||
"company_name": "TechStartup Inc.",
|
||||
"investment_amount": "$5,000,000",
|
||||
"valuation": "$20,000,000",
|
||||
"investor_rights": [
|
||||
"Board seat",
|
||||
"Pro-rata rights",
|
||||
"Information rights",
|
||||
],
|
||||
"type_of_security": "Series A Preferred Stock",
|
||||
}
|
||||
|
||||
# Generate a term sheet
|
||||
document = generate_legal_document(
|
||||
legal_agent, "term_sheet", parameters
|
||||
)
|
||||
|
||||
# Save the generated document
|
||||
with open("generated_term_sheet_draft.md", "w") as f:
|
||||
f.write(document)
|
@ -1,44 +0,0 @@
|
||||
import os
|
||||
|
||||
from swarms_memory import ChromaDB
|
||||
|
||||
from swarms import Agent
|
||||
from swarm_models import Anthropic
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
# Initilaize the chromadb client
|
||||
chromadb = ChromaDB(
|
||||
metric="cosine",
|
||||
output_dir="fiance_agent_rag",
|
||||
# docs_folder="artifacts", # Folder of your documents
|
||||
)
|
||||
|
||||
# Model
|
||||
model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
|
||||
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
agent_description="Agent creates ",
|
||||
llm=model,
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
long_term_memory=chromadb,
|
||||
)
|
||||
|
||||
|
||||
agent.run(
|
||||
"What are the components of a startups stock incentive equity plan"
|
||||
)
|
@ -1,117 +0,0 @@
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms_memory import ChromaDB
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
# Making an instance of the ChromaDB class
|
||||
memory = ChromaDB(
|
||||
metric="cosine",
|
||||
n_results=3,
|
||||
output_dir="results",
|
||||
docs_folder="docs",
|
||||
)
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
# Tools in swarms are simple python functions and docstrings
|
||||
def terminal(
|
||||
code: str,
|
||||
):
|
||||
"""
|
||||
Run code in the terminal.
|
||||
|
||||
Args:
|
||||
code (str): The code to run in the terminal.
|
||||
|
||||
Returns:
|
||||
str: The output of the code.
|
||||
"""
|
||||
out = subprocess.run(
|
||||
code, shell=True, capture_output=True, text=True
|
||||
).stdout
|
||||
return str(out)
|
||||
|
||||
|
||||
def browser(query: str):
|
||||
"""
|
||||
Search the query in the browser with the `browser` tool.
|
||||
|
||||
Args:
|
||||
query (str): The query to search in the browser.
|
||||
|
||||
Returns:
|
||||
str: The search results.
|
||||
"""
|
||||
import webbrowser
|
||||
|
||||
url = f"https://www.google.com/search?q={query}"
|
||||
webbrowser.open(url)
|
||||
return f"Searching for {query} in the browser."
|
||||
|
||||
|
||||
def create_file(file_path: str, content: str):
|
||||
"""
|
||||
Create a file using the file editor tool.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the file.
|
||||
content (str): The content to write to the file.
|
||||
|
||||
Returns:
|
||||
str: The result of the file creation operation.
|
||||
"""
|
||||
with open(file_path, "w") as file:
|
||||
file.write(content)
|
||||
return f"File {file_path} created successfully."
|
||||
|
||||
|
||||
def file_editor(file_path: str, mode: str, content: str):
|
||||
"""
|
||||
Edit a file using the file editor tool.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the file.
|
||||
mode (str): The mode to open the file in.
|
||||
content (str): The content to write to the file.
|
||||
|
||||
Returns:
|
||||
str: The result of the file editing operation.
|
||||
"""
|
||||
with open(file_path, mode) as file:
|
||||
file.write(content)
|
||||
return f"File {file_path} edited successfully."
|
||||
|
||||
|
||||
# Agent
|
||||
agent = Agent(
|
||||
agent_name="Devin",
|
||||
system_prompt=(
|
||||
"Autonomous agent that can interact with humans and other"
|
||||
" agents. Be Helpful and Kind. Use the tools provided to"
|
||||
" assist the user. Return all code in markdown format."
|
||||
),
|
||||
llm=model,
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
interactive=True,
|
||||
tools=[terminal, browser, file_editor, create_file],
|
||||
streaming=True,
|
||||
long_term_memory=memory,
|
||||
)
|
||||
|
||||
# Run the agent
|
||||
out = agent(
|
||||
"Create a CSV file with the latest tax rates for C corporations in the following ten states and the District of Columbia: Alabama, California, Florida, Georgia, Illinois, New York, North Carolina, Ohio, Texas, and Washington."
|
||||
)
|
||||
print(out)
|
@ -0,0 +1,319 @@
|
||||
"""
|
||||
Zoe - Real Estate Agent
|
||||
|
||||
"""
|
||||
|
||||
from typing import Optional, Dict, Any, List
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
from loguru import logger
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
from dotenv import load_dotenv
|
||||
from enum import Enum
|
||||
|
||||
# Configure loguru logger
|
||||
logger.add(
|
||||
"logs/real_estate_agent_{time}.log",
|
||||
rotation="500 MB",
|
||||
retention="10 days",
|
||||
level="INFO",
|
||||
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
|
||||
)
|
||||
|
||||
|
||||
class PropertyType(str, Enum):
|
||||
"""Enum for property types"""
|
||||
|
||||
OFFICE = "office"
|
||||
RETAIL = "retail"
|
||||
INDUSTRIAL = "industrial"
|
||||
MIXED_USE = "mixed-use"
|
||||
LAND = "land"
|
||||
|
||||
|
||||
@dataclass
|
||||
class PropertyListing:
|
||||
"""Data class for commercial property listings"""
|
||||
|
||||
property_id: str
|
||||
address: str
|
||||
city: str
|
||||
state: str
|
||||
zip_code: str
|
||||
price: float
|
||||
square_footage: float
|
||||
property_type: PropertyType
|
||||
zoning: str
|
||||
listing_date: datetime
|
||||
lat: float
|
||||
lng: float
|
||||
description: Optional[str] = None
|
||||
features: Optional[List[str]] = None
|
||||
images: Optional[List[str]] = None
|
||||
|
||||
|
||||
class PropertyRadarAPI:
|
||||
"""Client for PropertyRadar API integration"""
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
"""Initialize PropertyRadar API client
|
||||
|
||||
Args:
|
||||
api_key (str): PropertyRadar API key
|
||||
"""
|
||||
self.api_key = api_key
|
||||
self.base_url = "https://api.propertyradar.com/v1"
|
||||
self.session = requests.Session()
|
||||
self.session.headers.update(
|
||||
{
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
)
|
||||
|
||||
def search_properties(
|
||||
self,
|
||||
max_price: float = 10_000_000,
|
||||
property_types: List[PropertyType] = None,
|
||||
location: Dict[str, Any] = None,
|
||||
min_sqft: Optional[float] = None,
|
||||
max_sqft: Optional[float] = None,
|
||||
page: int = 1,
|
||||
limit: int = 20,
|
||||
) -> List[PropertyListing]:
|
||||
"""
|
||||
Search for commercial properties using PropertyRadar API
|
||||
|
||||
Args:
|
||||
max_price (float): Maximum property price
|
||||
property_types (List[PropertyType]): Types of properties to search for
|
||||
location (Dict[str, Any]): Location criteria (city, county, or coordinates)
|
||||
min_sqft (Optional[float]): Minimum square footage
|
||||
max_sqft (Optional[float]): Maximum square footage
|
||||
page (int): Page number for pagination
|
||||
limit (int): Number of results per page
|
||||
|
||||
Returns:
|
||||
List[PropertyListing]: List of matching properties
|
||||
"""
|
||||
try:
|
||||
# Build the query parameters
|
||||
params = {
|
||||
"price_max": max_price,
|
||||
"property_types": (
|
||||
[pt.value for pt in property_types]
|
||||
if property_types
|
||||
else None
|
||||
),
|
||||
"page": page,
|
||||
"limit": limit,
|
||||
"for_sale": True,
|
||||
"state": "FL", # Florida only
|
||||
"commercial_property": True,
|
||||
}
|
||||
|
||||
# Add location parameters
|
||||
if location:
|
||||
params.update(location)
|
||||
|
||||
# Add square footage filters
|
||||
if min_sqft:
|
||||
params["square_feet_min"] = min_sqft
|
||||
if max_sqft:
|
||||
params["square_feet_max"] = max_sqft
|
||||
|
||||
# Make the API request
|
||||
response = self.session.get(
|
||||
f"{self.base_url}/properties",
|
||||
params={
|
||||
k: v for k, v in params.items() if v is not None
|
||||
},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse the response
|
||||
properties_data = response.json()
|
||||
|
||||
# Convert to PropertyListing objects
|
||||
return [
|
||||
PropertyListing(
|
||||
property_id=prop["id"],
|
||||
address=prop["address"],
|
||||
city=prop["city"],
|
||||
state=prop["state"],
|
||||
zip_code=prop["zip_code"],
|
||||
price=float(prop["price"]),
|
||||
square_footage=float(prop["square_feet"]),
|
||||
property_type=PropertyType(prop["property_type"]),
|
||||
zoning=prop["zoning"],
|
||||
listing_date=datetime.fromisoformat(
|
||||
prop["list_date"]
|
||||
),
|
||||
lat=float(prop["latitude"]),
|
||||
lng=float(prop["longitude"]),
|
||||
description=prop.get("description"),
|
||||
features=prop.get("features", []),
|
||||
images=prop.get("images", []),
|
||||
)
|
||||
for prop in properties_data["results"]
|
||||
]
|
||||
|
||||
except requests.RequestException as e:
|
||||
logger.error(f"Error fetching properties: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
class CommercialRealEstateAgent:
|
||||
"""Agent for searching and analyzing commercial real estate properties"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
openai_api_key: str,
|
||||
propertyradar_api_key: str,
|
||||
model_name: str = "gpt-4",
|
||||
temperature: float = 0.1,
|
||||
saved_state_path: Optional[str] = None,
|
||||
):
|
||||
"""Initialize the real estate agent
|
||||
|
||||
Args:
|
||||
openai_api_key (str): OpenAI API key
|
||||
propertyradar_api_key (str): PropertyRadar API key
|
||||
model_name (str): Name of the LLM model to use
|
||||
temperature (float): Temperature setting for the LLM
|
||||
saved_state_path (Optional[str]): Path to save agent state
|
||||
"""
|
||||
self.property_api = PropertyRadarAPI(propertyradar_api_key)
|
||||
|
||||
# Initialize OpenAI model
|
||||
self.model = OpenAIChat(
|
||||
openai_api_key=openai_api_key,
|
||||
model_name=model_name,
|
||||
temperature=temperature,
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
self.agent = Agent(
|
||||
agent_name="Commercial-Real-Estate-Agent",
|
||||
system_prompt=self._get_system_prompt(),
|
||||
llm=self.model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
saved_state_path=saved_state_path,
|
||||
context_length=200000,
|
||||
streaming_on=False,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Commercial Real Estate Agent initialized successfully"
|
||||
)
|
||||
|
||||
def _get_system_prompt(self) -> str:
|
||||
"""Get the system prompt for the agent"""
|
||||
return """You are a specialized commercial real estate agent assistant focused on Central Florida properties.
|
||||
Your primary responsibilities are:
|
||||
1. Search for commercial properties under $10 million
|
||||
2. Focus on properties zoned for commercial use
|
||||
3. Provide detailed analysis of property features, location benefits, and potential ROI
|
||||
4. Consider local market conditions and growth potential
|
||||
5. Verify zoning compliance and restrictions
|
||||
|
||||
When analyzing properties, consider:
|
||||
- Current market valuations
|
||||
- Local business development plans
|
||||
- Traffic patterns and accessibility
|
||||
- Nearby amenities and businesses
|
||||
- Future development potential"""
|
||||
|
||||
def search_properties(
|
||||
self,
|
||||
max_price: float = 10_000_000,
|
||||
property_types: List[PropertyType] = None,
|
||||
location: Dict[str, Any] = None,
|
||||
min_sqft: Optional[float] = None,
|
||||
max_sqft: Optional[float] = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Search for properties and provide analysis
|
||||
|
||||
Args:
|
||||
max_price (float): Maximum property price
|
||||
property_types (List[PropertyType]): Types of properties to search
|
||||
location (Dict[str, Any]): Location criteria
|
||||
min_sqft (Optional[float]): Minimum square footage
|
||||
max_sqft (Optional[float]): Maximum square footage
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: List of properties with analysis
|
||||
"""
|
||||
try:
|
||||
# Search for properties
|
||||
properties = self.property_api.search_properties(
|
||||
max_price=max_price,
|
||||
property_types=property_types,
|
||||
location=location,
|
||||
min_sqft=min_sqft,
|
||||
max_sqft=max_sqft,
|
||||
)
|
||||
|
||||
# Analyze each property
|
||||
analyzed_properties = []
|
||||
for prop in properties:
|
||||
analysis = self.agent.run(
|
||||
f"Analyze this commercial property:\n"
|
||||
f"Address: {prop.address}, {prop.city}, FL {prop.zip_code}\n"
|
||||
f"Price: ${prop.price:,.2f}\n"
|
||||
f"Square Footage: {prop.square_footage:,.0f}\n"
|
||||
f"Property Type: {prop.property_type.value}\n"
|
||||
f"Zoning: {prop.zoning}\n"
|
||||
f"Description: {prop.description or 'Not provided'}"
|
||||
)
|
||||
|
||||
analyzed_properties.append(
|
||||
{"property": prop.__dict__, "analysis": analysis}
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Successfully analyzed {len(analyzed_properties)} properties"
|
||||
)
|
||||
return analyzed_properties
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error in property search and analysis: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to demonstrate usage"""
|
||||
load_dotenv()
|
||||
|
||||
# Initialize the agent
|
||||
agent = CommercialRealEstateAgent(
|
||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||
propertyradar_api_key=os.getenv("PROPERTYRADAR_API_KEY"),
|
||||
saved_state_path="real_estate_agent_state.json",
|
||||
)
|
||||
|
||||
# Example search
|
||||
results = agent.search_properties(
|
||||
max_price=5_000_000,
|
||||
property_types=[PropertyType.RETAIL, PropertyType.OFFICE],
|
||||
location={"city": "Orlando", "radius_miles": 25},
|
||||
min_sqft=2000,
|
||||
)
|
||||
|
||||
# Save results
|
||||
with open("search_results.json", "w") as f:
|
||||
json.dump(results, f, default=str, indent=2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Set up logging
|
||||
LOG_FILE="docs_compilation.log"
|
||||
OUTPUT_FILE="combined_docs.txt"
|
||||
|
||||
# Initialize log file
|
||||
echo "$(date): Starting documentation compilation" > "$LOG_FILE"
|
||||
|
||||
# Create/clear output file
|
||||
> "$OUTPUT_FILE"
|
||||
|
||||
# Function to determine file type and handle accordingly
|
||||
process_file() {
|
||||
local file="$1"
|
||||
|
||||
# Get file extension
|
||||
extension="${file##*.}"
|
||||
|
||||
echo "$(date): Processing $file" >> "$LOG_FILE"
|
||||
|
||||
case "$extension" in
|
||||
md|markdown)
|
||||
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
|
||||
cat "$file" >> "$OUTPUT_FILE"
|
||||
echo -e "\n\n" >> "$OUTPUT_FILE"
|
||||
;;
|
||||
txt)
|
||||
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
|
||||
cat "$file" >> "$OUTPUT_FILE"
|
||||
echo -e "\n\n" >> "$OUTPUT_FILE"
|
||||
;;
|
||||
*)
|
||||
echo "$(date): Skipping $file - unsupported format" >> "$LOG_FILE"
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "$(date): Successfully processed $file" >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Find and process all documentation files
|
||||
find ../docs -type f \( -name "*.md" -o -name "*.txt" -o -name "*.markdown" \) | while read -r file; do
|
||||
process_file "$file"
|
||||
done
|
||||
|
||||
# Log completion
|
||||
echo "$(date): Documentation compilation complete" >> "$LOG_FILE"
|
||||
echo "$(date): Output saved to $OUTPUT_FILE" >> "$LOG_FILE"
|
||||
|
||||
# Print summary
|
||||
echo "Documentation compilation complete. Check $LOG_FILE for details."
|
@ -0,0 +1,117 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms import Agent, SequentialWorkflow
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
# Initialize specialized agents
|
||||
data_extractor_agent = Agent(
|
||||
agent_name="Data-Extractor",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="data_extractor_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
summarizer_agent = Agent(
|
||||
agent_name="Document-Summarizer",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="summarizer_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
financial_analyst_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="financial_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
market_analyst_agent = Agent(
|
||||
agent_name="Market-Analyst",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="market_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
operational_analyst_agent = Agent(
|
||||
agent_name="Operational-Analyst",
|
||||
system_prompt=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="operational_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
# Initialize the SwarmRouter
|
||||
router = SequentialWorkflow(
|
||||
name="pe-document-analysis-swarm",
|
||||
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||
max_loops=1,
|
||||
agents=[
|
||||
data_extractor_agent,
|
||||
summarizer_agent,
|
||||
financial_analyst_agent,
|
||||
market_analyst_agent,
|
||||
operational_analyst_agent,
|
||||
],
|
||||
output_type="all",
|
||||
)
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Run a comprehensive private equity document analysis task
|
||||
result = router.run(
|
||||
"Where is the best place to find template term sheets for series A startups. Provide links and references"
|
||||
)
|
||||
print(result)
|
@ -1,120 +0,0 @@
|
||||
from swarms.utils.loguru_logger import logger
|
||||
import yaml
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Optional
|
||||
import json
|
||||
from swarms.structs.agent_registry import AgentRegistry
|
||||
from swarms.structs.agent import Agent
|
||||
from swarm_models.popular_llms import OpenAIChat
|
||||
|
||||
|
||||
class AgentInput(BaseModel):
|
||||
agent_name: str = "Swarm Agent"
|
||||
system_prompt: Optional[str] = None
|
||||
agent_description: Optional[str] = None
|
||||
model_name: str = "OpenAIChat"
|
||||
max_loops: int = 1
|
||||
autosave: bool = False
|
||||
dynamic_temperature_enabled: bool = False
|
||||
dashboard: bool = False
|
||||
verbose: bool = False
|
||||
streaming_on: bool = True
|
||||
saved_state_path: Optional[str] = None
|
||||
sop: Optional[str] = None
|
||||
sop_list: Optional[List[str]] = None
|
||||
user_name: str = "User"
|
||||
retry_attempts: int = 3
|
||||
context_length: int = 8192
|
||||
task: Optional[str] = None
|
||||
interactive: bool = False
|
||||
|
||||
|
||||
def parse_yaml_to_json(yaml_str: str) -> str:
|
||||
"""
|
||||
Parses the given YAML string into an AgentInput model and converts it to a JSON string.
|
||||
|
||||
Args:
|
||||
yaml_str (str): The YAML string to be parsed.
|
||||
|
||||
Returns:
|
||||
str: The JSON string representation of the parsed YAML.
|
||||
|
||||
Raises:
|
||||
ValueError: If the YAML string cannot be parsed into the AgentInput model.
|
||||
"""
|
||||
try:
|
||||
data = yaml.safe_load(yaml_str)
|
||||
agent_input = AgentInput(**data)
|
||||
return agent_input.json()
|
||||
except yaml.YAMLError as e:
|
||||
print(f"YAML Error: {e}")
|
||||
raise ValueError("Invalid YAML input.") from e
|
||||
except ValueError as e:
|
||||
print(f"Validation Error: {e}")
|
||||
raise ValueError("Invalid data for AgentInput model.") from e
|
||||
|
||||
|
||||
# # Example usage
|
||||
# yaml_input = """
|
||||
# agent_name: "Custom Agent"
|
||||
# system_prompt: "System prompt example"
|
||||
# agent_description: "This is a test agent"
|
||||
# model_name: "CustomModel"
|
||||
# max_loops: 5
|
||||
# autosave: true
|
||||
# dynamic_temperature_enabled: true
|
||||
# dashboard: true
|
||||
# verbose: true
|
||||
# streaming_on: false
|
||||
# saved_state_path: "/path/to/state"
|
||||
# sop: "Standard operating procedure"
|
||||
# sop_list: ["step1", "step2"]
|
||||
# user_name: "Tester"
|
||||
# retry_attempts: 5
|
||||
# context_length: 4096
|
||||
# task: "Perform testing"
|
||||
# """
|
||||
|
||||
# json_output = parse_yaml_to_json(yaml_input)
|
||||
# print(json_output)
|
||||
|
||||
registry = AgentRegistry()
|
||||
|
||||
|
||||
def create_agent_from_yaml(yaml_path: str) -> None:
|
||||
with open(yaml_path, "r") as file:
|
||||
yaml_str = file.read()
|
||||
agent_json = parse_yaml_to_json(yaml_str)
|
||||
agent_config = json.loads(agent_json)
|
||||
|
||||
agent = Agent(
|
||||
agent_name=agent_config.get("agent_name", "Swarm Agent"),
|
||||
system_prompt=agent_config.get("system_prompt"),
|
||||
agent_description=agent_config.get("agent_description"),
|
||||
llm=OpenAIChat(),
|
||||
max_loops=agent_config.get("max_loops", 1),
|
||||
autosave=agent_config.get("autosave", False),
|
||||
dynamic_temperature_enabled=agent_config.get(
|
||||
"dynamic_temperature_enabled", False
|
||||
),
|
||||
dashboard=agent_config.get("dashboard", False),
|
||||
verbose=agent_config.get("verbose", False),
|
||||
streaming_on=agent_config.get("streaming_on", True),
|
||||
saved_state_path=agent_config.get("saved_state_path"),
|
||||
retry_attempts=agent_config.get("retry_attempts", 3),
|
||||
context_length=agent_config.get("context_length", 8192),
|
||||
)
|
||||
|
||||
registry.add(agent.agent_name, agent)
|
||||
logger.info(f"Agent {agent.agent_name} created from {yaml_path}.")
|
||||
|
||||
|
||||
def run_agent(agent_name: str, task: str) -> None:
|
||||
agent = registry.find_agent_by_name(agent_name)
|
||||
agent.run(task)
|
||||
|
||||
|
||||
def list_agents() -> None:
|
||||
agents = registry.list_agents()
|
||||
for agent_id in agents:
|
||||
print(agent_id)
|
@ -1,10 +0,0 @@
|
||||
from typing import List
|
||||
from pydantic import BaseModel
|
||||
from swarms.schemas.agent_step_schemas import Step
|
||||
|
||||
|
||||
class Plan(BaseModel):
|
||||
steps: List[Step]
|
||||
|
||||
class Config:
|
||||
orm_mode = True
|
@ -0,0 +1,93 @@
|
||||
from typing import List, Any
|
||||
from loguru import logger
|
||||
from swarms.structs.agent import Agent
|
||||
|
||||
|
||||
def get_agent_name(agent: Any) -> str:
|
||||
"""Helper function to safely get agent name
|
||||
|
||||
Args:
|
||||
agent (Any): The agent object to get name from
|
||||
|
||||
Returns:
|
||||
str: The agent's name if found, 'Unknown' otherwise
|
||||
"""
|
||||
if isinstance(agent, Agent) and hasattr(agent, "agent_name"):
|
||||
return agent.agent_name
|
||||
return "Unknown"
|
||||
|
||||
|
||||
def get_agent_description(agent: Any) -> str:
|
||||
"""Helper function to get agent description or system prompt preview
|
||||
|
||||
Args:
|
||||
agent (Any): The agent object
|
||||
|
||||
Returns:
|
||||
str: Description or first 100 chars of system prompt
|
||||
"""
|
||||
if not isinstance(agent, Agent):
|
||||
return "N/A"
|
||||
|
||||
if hasattr(agent, "description") and agent.description:
|
||||
return agent.description
|
||||
|
||||
if hasattr(agent, "system_prompt") and agent.system_prompt:
|
||||
return f"{agent.system_prompt[:150]}..."
|
||||
|
||||
return "N/A"
|
||||
|
||||
|
||||
def showcase_available_agents(
|
||||
name: str = None,
|
||||
description: str = None,
|
||||
agents: List[Agent] = [],
|
||||
update_agents_on: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a formatted string showcasing all available agents and their descriptions.
|
||||
|
||||
Args:
|
||||
agents (List[Agent]): List of Agent objects to showcase.
|
||||
update_agents_on (bool, optional): If True, updates each agent's system prompt with
|
||||
the showcase information. Defaults to False.
|
||||
|
||||
Returns:
|
||||
str: Formatted string containing agent information, including names, descriptions
|
||||
and IDs for all available agents.
|
||||
"""
|
||||
logger.info(f"Showcasing {len(agents)} available agents")
|
||||
|
||||
formatted_agents = []
|
||||
header = f"\n####### Agents available in the swarm: {name} ############\n"
|
||||
header += f"{description}\n"
|
||||
row_format = "{:<5} | {:<20} | {:<50}"
|
||||
header_row = row_format.format("ID", "Agent Name", "Description")
|
||||
separator = "-" * 80
|
||||
|
||||
formatted_agents.append(header)
|
||||
formatted_agents.append(separator)
|
||||
formatted_agents.append(header_row)
|
||||
formatted_agents.append(separator)
|
||||
|
||||
for idx, agent in enumerate(agents):
|
||||
if not isinstance(agent, Agent):
|
||||
logger.warning(
|
||||
f"Skipping non-Agent object: {type(agent)}"
|
||||
)
|
||||
continue
|
||||
|
||||
agent_name = get_agent_name(agent)
|
||||
description = (
|
||||
get_agent_description(agent)[:100] + "..."
|
||||
if len(get_agent_description(agent)) > 100
|
||||
else get_agent_description(agent)
|
||||
)
|
||||
|
||||
formatted_agents.append(
|
||||
row_format.format(idx + 1, agent_name, description)
|
||||
)
|
||||
|
||||
showcase = "\n".join(formatted_agents)
|
||||
|
||||
return showcase
|
@ -1,3 +0,0 @@
|
||||
"""
|
||||
This class will input a swarm type -> then auto generate a list of `Agent` structures with their name, descriptions, system prompts, and more.
|
||||
"""
|
@ -0,0 +1,299 @@
|
||||
from loguru import logger
|
||||
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from swarm_models import OpenAIFunctionCaller, OpenAIChat
|
||||
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.swarm_router import SwarmRouter
|
||||
|
||||
|
||||
class AgentConfig(BaseModel):
|
||||
"""Configuration for an individual agent in a swarm"""
|
||||
|
||||
name: str = Field(
|
||||
description="The name of the agent", example="Research-Agent"
|
||||
)
|
||||
description: str = Field(
|
||||
description="A description of the agent's purpose and capabilities",
|
||||
example="Agent responsible for researching and gathering information",
|
||||
)
|
||||
system_prompt: str = Field(
|
||||
description="The system prompt that defines the agent's behavior",
|
||||
example="You are a research agent. Your role is to gather and analyze information...",
|
||||
)
|
||||
max_loops: int = Field(
|
||||
description="Maximum number of reasoning loops the agent can perform",
|
||||
example=3,
|
||||
)
|
||||
|
||||
|
||||
class SwarmConfig(BaseModel):
|
||||
"""Configuration for a swarm of cooperative agents"""
|
||||
|
||||
name: str = Field(
|
||||
description="The name of the swarm",
|
||||
example="Research-Writing-Swarm",
|
||||
)
|
||||
description: str = Field(
|
||||
description="The description of the swarm's purpose and capabilities",
|
||||
example="A swarm of agents that work together to research topics and write articles",
|
||||
)
|
||||
agents: List[AgentConfig] = Field(
|
||||
description="The list of agents that make up the swarm",
|
||||
example=[
|
||||
AgentConfig(
|
||||
name="Research-Agent",
|
||||
description="Gathers information",
|
||||
system_prompt="You are a research agent...",
|
||||
max_loops=2,
|
||||
),
|
||||
AgentConfig(
|
||||
name="Writing-Agent",
|
||||
description="Writes content",
|
||||
system_prompt="You are a writing agent...",
|
||||
max_loops=1,
|
||||
),
|
||||
],
|
||||
)
|
||||
max_loops: int = Field(
|
||||
description="The maximum number of loops to run the swarm",
|
||||
example=1,
|
||||
)
|
||||
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
|
||||
BOSS_SYSTEM_PROMPT = """
|
||||
Manage a swarm of worker agents to efficiently serve the user by deciding whether to create new agents or delegate tasks. Ensure operations are efficient and effective.
|
||||
|
||||
### Instructions:
|
||||
|
||||
1. **Task Assignment**:
|
||||
- Analyze available worker agents when a task is presented.
|
||||
- Delegate tasks to existing agents with clear, direct, and actionable instructions if an appropriate agent is available.
|
||||
- If no suitable agent exists, create a new agent with a fitting system prompt to handle the task.
|
||||
|
||||
2. **Agent Creation**:
|
||||
- Name agents according to the task they are intended to perform (e.g., "Twitter Marketing Agent").
|
||||
- Provide each new agent with a concise and clear system prompt that includes its role, objectives, and any tools it can utilize.
|
||||
|
||||
3. **Efficiency**:
|
||||
- Minimize redundancy and maximize task completion speed.
|
||||
- Avoid unnecessary agent creation if an existing agent can fulfill the task.
|
||||
|
||||
4. **Communication**:
|
||||
- Be explicit in task delegation instructions to avoid ambiguity and ensure effective task execution.
|
||||
- Require agents to report back on task completion or encountered issues.
|
||||
|
||||
5. **Reasoning and Decisions**:
|
||||
- Offer brief reasoning when selecting or creating agents to maintain transparency.
|
||||
- Avoid using an agent if unnecessary, with a clear explanation if no agents are suitable for a task.
|
||||
|
||||
# Output Format
|
||||
|
||||
Present your plan in clear, bullet-point format or short concise paragraphs, outlining task assignment, agent creation, efficiency strategies, and communication protocols.
|
||||
|
||||
# Notes
|
||||
|
||||
- Preserve transparency by always providing reasoning for task-agent assignments and creation.
|
||||
- Ensure instructions to agents are unambiguous to minimize error.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class AutoSwarmBuilder:
|
||||
"""A class that automatically builds and manages swarms of AI agents.
|
||||
|
||||
This class handles the creation, coordination and execution of multiple AI agents working
|
||||
together as a swarm to accomplish complex tasks. It uses a boss agent to delegate work
|
||||
and create new specialized agents as needed.
|
||||
|
||||
Args:
|
||||
name (str): The name of the swarm
|
||||
description (str): A description of the swarm's purpose
|
||||
verbose (bool, optional): Whether to output detailed logs. Defaults to True.
|
||||
max_loops (int, optional): Maximum number of execution loops. Defaults to 1.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str = None,
|
||||
description: str = None,
|
||||
verbose: bool = True,
|
||||
max_loops: int = 1,
|
||||
):
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.verbose = verbose
|
||||
self.max_loops = max_loops
|
||||
self.agents_pool = []
|
||||
logger.info(
|
||||
f"Initialized AutoSwarmBuilder: {name} {description}"
|
||||
)
|
||||
|
||||
# @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
|
||||
def run(self, task: str, image_url: str = None, *args, **kwargs):
|
||||
"""Run the swarm on a given task.
|
||||
|
||||
Args:
|
||||
task (str): The task to be accomplished
|
||||
image_url (str, optional): URL of an image input if needed. Defaults to None.
|
||||
*args: Variable length argument list
|
||||
**kwargs: Arbitrary keyword arguments
|
||||
|
||||
Returns:
|
||||
The output from the swarm's execution
|
||||
"""
|
||||
logger.info(f"Running swarm on task: {task}")
|
||||
agents = self._create_agents(task, image_url, *args, **kwargs)
|
||||
logger.info(f"Agents created {len(agents)}")
|
||||
logger.info("Routing task through swarm")
|
||||
output = self.swarm_router(agents, task, image_url)
|
||||
logger.info(f"Swarm execution complete with output: {output}")
|
||||
return output
|
||||
|
||||
# @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
|
||||
def _create_agents(self, task: str, *args, **kwargs):
|
||||
"""Create the necessary agents for a task.
|
||||
|
||||
Args:
|
||||
task (str): The task to create agents for
|
||||
*args: Variable length argument list
|
||||
**kwargs: Arbitrary keyword arguments
|
||||
|
||||
Returns:
|
||||
list: List of created agents
|
||||
"""
|
||||
logger.info("Creating agents for task")
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt=BOSS_SYSTEM_PROMPT,
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
temperature=0.1,
|
||||
base_model=SwarmConfig,
|
||||
)
|
||||
|
||||
agents_dictionary = model.run(task)
|
||||
logger.info(f"Agents dictionary: {agents_dictionary}")
|
||||
|
||||
# Convert dictionary to SwarmConfig if needed
|
||||
if isinstance(agents_dictionary, dict):
|
||||
agents_dictionary = SwarmConfig(**agents_dictionary)
|
||||
|
||||
# Set swarm config
|
||||
self.name = agents_dictionary.name
|
||||
self.description = agents_dictionary.description
|
||||
self.max_loops = getattr(
|
||||
agents_dictionary, "max_loops", 1
|
||||
) # Default to 1 if not set
|
||||
|
||||
logger.info(
|
||||
f"Swarm config: {self.name}, {self.description}, {self.max_loops}"
|
||||
)
|
||||
|
||||
# Create agents from config
|
||||
agents = []
|
||||
for agent_config in agents_dictionary.agents:
|
||||
# Convert dict to AgentConfig if needed
|
||||
if isinstance(agent_config, dict):
|
||||
agent_config = AgentConfig(**agent_config)
|
||||
|
||||
agent = self.build_agent(
|
||||
agent_name=agent_config.name,
|
||||
agent_description=agent_config.description,
|
||||
agent_system_prompt=agent_config.system_prompt,
|
||||
max_loops=agent_config.max_loops,
|
||||
)
|
||||
agents.append(agent)
|
||||
|
||||
return agents
|
||||
|
||||
def build_agent(
|
||||
self,
|
||||
agent_name: str,
|
||||
agent_description: str,
|
||||
agent_system_prompt: str,
|
||||
max_loops: int = 1,
|
||||
):
|
||||
"""Build a single agent with the given specifications.
|
||||
|
||||
Args:
|
||||
agent_name (str): Name of the agent
|
||||
agent_description (str): Description of the agent's purpose
|
||||
agent_system_prompt (str): The system prompt for the agent
|
||||
|
||||
Returns:
|
||||
Agent: The constructed agent instance
|
||||
"""
|
||||
logger.info(f"Building agent: {agent_name}")
|
||||
agent = Agent(
|
||||
agent_name=agent_name,
|
||||
description=agent_description,
|
||||
system_prompt=agent_system_prompt,
|
||||
llm=model,
|
||||
max_loops=max_loops,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path=f"{agent_name}.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
return_step_meta=False,
|
||||
output_type="str", # "json", "dict", "csv" OR "string" soon "yaml" and
|
||||
streaming_on=False,
|
||||
auto_generate_prompt=True,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
# @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
|
||||
def swarm_router(
|
||||
self,
|
||||
agents: List[Agent],
|
||||
task: str,
|
||||
image_url: str = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""Route tasks between agents in the swarm.
|
||||
|
||||
Args:
|
||||
agents (List[Agent]): List of available agents
|
||||
task (str): The task to route
|
||||
image_url (str, optional): URL of an image input if needed. Defaults to None.
|
||||
*args: Variable length argument list
|
||||
**kwargs: Arbitrary keyword arguments
|
||||
|
||||
Returns:
|
||||
The output from the routed task execution
|
||||
"""
|
||||
logger.info("Routing task through swarm")
|
||||
swarm_router_instance = SwarmRouter(
|
||||
agents=agents,
|
||||
swarm_type="auto",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
return swarm_router_instance.run(
|
||||
self.name + " " + self.description + " " + task,
|
||||
)
|
||||
|
||||
|
||||
example = AutoSwarmBuilder()
|
||||
|
||||
print(
|
||||
example.run(
|
||||
"Write multiple blog posts about the latest advancements in swarm intelligence all at once"
|
||||
)
|
||||
)
|
@ -1,214 +0,0 @@
|
||||
import hashlib
|
||||
from time import time_ns
|
||||
from typing import Callable, List, Optional, Sequence, Union
|
||||
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.utils.loguru_logger import logger
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
|
||||
|
||||
def _hash(input: str):
|
||||
"""
|
||||
Hashes the input string using SHA256 algorithm.
|
||||
|
||||
Args:
|
||||
input (str): The string to be hashed.
|
||||
|
||||
Returns:
|
||||
str: The hexadecimal representation of the hash value.
|
||||
"""
|
||||
hex_dig = hashlib.sha256(input.encode("utf-8")).hexdigest()
|
||||
return hex_dig
|
||||
|
||||
|
||||
def msg_hash(
|
||||
agent: Agent, content: str, turn: int, msg_type: str = "text"
|
||||
):
|
||||
"""
|
||||
Generate a hash value for a message.
|
||||
|
||||
Args:
|
||||
agent (Agent): The agent sending the message.
|
||||
content (str): The content of the message.
|
||||
turn (int): The turn number of the message.
|
||||
msg_type (str, optional): The type of the message. Defaults to "text".
|
||||
|
||||
Returns:
|
||||
int: The hash value of the message.
|
||||
"""
|
||||
time = time_ns()
|
||||
return _hash(
|
||||
f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:"
|
||||
f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}"
|
||||
)
|
||||
|
||||
|
||||
class MessagePool(BaseSwarm):
|
||||
"""
|
||||
A class representing a message pool for agents in a swarm.
|
||||
|
||||
Attributes:
|
||||
agents (Optional[Sequence[Agent]]): The list of agents in the swarm.
|
||||
moderator (Optional[Agent]): The moderator agent.
|
||||
turns (Optional[int]): The number of turns.
|
||||
routing_function (Optional[Callable]): The routing function for message distribution.
|
||||
show_names (Optional[bool]): Flag indicating whether to show agent names.
|
||||
messages (List[Dict]): The list of messages in the pool.
|
||||
|
||||
Examples:
|
||||
>>> from swarms.structs.agent import Agent
|
||||
>>> from swarms.structs.message_pool import MessagePool
|
||||
>>> agent1 = Agent(agent_name="agent1")
|
||||
>>> agent2 = Agent(agent_name="agent2")
|
||||
>>> agent3 = Agent(agent_name="agent3")
|
||||
>>> moderator = Agent(agent_name="moderator")
|
||||
>>> agents = [agent1, agent2, agent3]
|
||||
>>> message_pool = MessagePool(agents=agents, moderator=moderator, turns=5)
|
||||
>>> message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
|
||||
>>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
|
||||
>>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
|
||||
>>> message_pool.get_all_messages()
|
||||
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
|
||||
>>> message_pool.get_visible_messages(agent=agent1, turn=1)
|
||||
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
|
||||
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
|
||||
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
agents: Optional[Sequence[Agent]] = None,
|
||||
moderator: Optional[Agent] = None,
|
||||
turns: Optional[int] = 5,
|
||||
routing_function: Optional[Callable] = None,
|
||||
show_names: Optional[bool] = False,
|
||||
autosave: Optional[bool] = False,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.agent = agents
|
||||
self.moderator = moderator
|
||||
self.turns = turns
|
||||
self.routing_function = routing_function
|
||||
self.show_names = show_names
|
||||
self.autosave = autosave
|
||||
|
||||
self.messages = []
|
||||
|
||||
logger.info("MessagePool initialized")
|
||||
logger.info(f"Number of agents: {len(agents)}")
|
||||
logger.info(
|
||||
f"Agents: {[agent.agent_name for agent in agents]}"
|
||||
)
|
||||
logger.info(f"moderator: {moderator.agent_name} is available")
|
||||
logger.info(f"Number of turns: {turns}")
|
||||
|
||||
def add(
|
||||
self,
|
||||
agent: Agent,
|
||||
content: str,
|
||||
turn: int,
|
||||
visible_to: Union[str, List[str]] = "all",
|
||||
logged: bool = True,
|
||||
):
|
||||
"""
|
||||
Add a message to the pool.
|
||||
|
||||
Args:
|
||||
agent (Agent): The agent sending the message.
|
||||
content (str): The content of the message.
|
||||
turn (int): The turn number.
|
||||
visible_to (Union[str, List[str]], optional): The agents who can see the message. Defaults to "all".
|
||||
logged (bool, optional): Flag indicating whether the message should be logged. Defaults to True.
|
||||
"""
|
||||
|
||||
self.messages.append(
|
||||
{
|
||||
"agent": agent,
|
||||
"content": content,
|
||||
"turn": turn,
|
||||
"visible_to": visible_to,
|
||||
"logged": logged,
|
||||
}
|
||||
)
|
||||
logger.info(f"Message added: {content}")
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset the message pool.
|
||||
"""
|
||||
self.messages = []
|
||||
logger.info("MessagePool reset")
|
||||
|
||||
def last_turn(self):
|
||||
"""
|
||||
Get the last turn number.
|
||||
|
||||
Returns:
|
||||
int: The last turn number.
|
||||
"""
|
||||
if len(self.messages) == 0:
|
||||
return 0
|
||||
else:
|
||||
return self.messages[-1]["turn"]
|
||||
|
||||
@property
|
||||
def last_message(self):
|
||||
"""
|
||||
Get the last message in the pool.
|
||||
|
||||
Returns:
|
||||
dict: The last message.
|
||||
"""
|
||||
if len(self.messages) == 0:
|
||||
return None
|
||||
else:
|
||||
return self.messages[-1]
|
||||
|
||||
def get_all_messages(self):
|
||||
"""
|
||||
Get all messages in the pool.
|
||||
|
||||
Returns:
|
||||
List[Dict]: The list of all messages.
|
||||
"""
|
||||
return self.messages
|
||||
|
||||
def get_visible_messages(self, agent: Agent, turn: int):
|
||||
"""
|
||||
Get the visible messages for a given agent and turn.
|
||||
|
||||
Args:
|
||||
agent (Agent): The agent.
|
||||
turn (int): The turn number.
|
||||
|
||||
Returns:
|
||||
List[Dict]: The list of visible messages.
|
||||
"""
|
||||
# Get the messages before the current turn
|
||||
prev_messages = [
|
||||
message
|
||||
for message in self.messages
|
||||
if message["turn"] < turn
|
||||
]
|
||||
|
||||
visible_messages = []
|
||||
for message in prev_messages:
|
||||
if (
|
||||
message["visible_to"] == "all"
|
||||
or agent.agent_name in message["visible_to"]
|
||||
):
|
||||
visible_messages.append(message)
|
||||
return visible_messages
|
||||
|
||||
# def query(self, query: str):
|
||||
# """
|
||||
# Query a message from the messages list and then pass it to the moderator
|
||||
# """
|
||||
# return [
|
||||
# (mod, content)
|
||||
# for mod, content, _ in self.messages # Add an underscore to ignore the rest of the elements
|
||||
# if query in content
|
||||
# ]
|
@ -1,16 +0,0 @@
|
||||
def log_agent_data(data: dict):
|
||||
import requests
|
||||
|
||||
data_dict = {
|
||||
"data": data,
|
||||
}
|
||||
|
||||
url = "https://swarms.world/api/get-agents/log-agents"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869",
|
||||
}
|
||||
|
||||
response = requests.post(url, json=data_dict, headers=headers)
|
||||
|
||||
return response.json()
|
@ -0,0 +1,141 @@
|
||||
from typing import Any, List, Optional, Union
|
||||
from pathlib import Path
|
||||
from loguru import logger
|
||||
from doc_master import doc_master
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
)
|
||||
def _process_document(doc_path: Union[str, Path]) -> str:
|
||||
"""Safely process a single document with retries.
|
||||
|
||||
Args:
|
||||
doc_path: Path to the document to process
|
||||
|
||||
Returns:
|
||||
Processed document text
|
||||
|
||||
Raises:
|
||||
Exception: If document processing fails after retries
|
||||
"""
|
||||
try:
|
||||
return doc_master(
|
||||
file_path=str(doc_path), output_type="string"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error processing document {doc_path}: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def handle_input_docs(
|
||||
agents: Any,
|
||||
docs: Optional[List[Union[str, Path]]] = None,
|
||||
doc_folder: Optional[Union[str, Path]] = None,
|
||||
max_workers: int = 4,
|
||||
chunk_size: int = 1000000,
|
||||
) -> Any:
|
||||
"""
|
||||
Add document content to agent prompts with improved reliability and performance.
|
||||
|
||||
Args:
|
||||
agents: Dictionary mapping agent names to Agent objects
|
||||
docs: List of document paths
|
||||
doc_folder: Path to folder containing documents
|
||||
max_workers: Maximum number of parallel document processing workers
|
||||
chunk_size: Maximum characters to process at once to avoid memory issues
|
||||
|
||||
Raises:
|
||||
ValueError: If neither docs nor doc_folder is provided
|
||||
RuntimeError: If document processing fails
|
||||
"""
|
||||
if not agents:
|
||||
logger.warning(
|
||||
"No agents provided, skipping document distribution"
|
||||
)
|
||||
return
|
||||
|
||||
if not docs and not doc_folder:
|
||||
logger.warning(
|
||||
"No documents or folder provided, skipping document distribution"
|
||||
)
|
||||
return
|
||||
|
||||
logger.info("Starting document distribution to agents")
|
||||
|
||||
try:
|
||||
processed_docs = []
|
||||
|
||||
# Process individual documents in parallel
|
||||
if docs:
|
||||
with ThreadPoolExecutor(
|
||||
max_workers=max_workers
|
||||
) as executor:
|
||||
future_to_doc = {
|
||||
executor.submit(_process_document, doc): doc
|
||||
for doc in docs
|
||||
}
|
||||
|
||||
for future in as_completed(future_to_doc):
|
||||
doc = future_to_doc[future]
|
||||
try:
|
||||
processed_docs.append(future.result())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to process document {doc}: {str(e)}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Document processing failed: {str(e)}"
|
||||
)
|
||||
|
||||
# Process folder if specified
|
||||
elif doc_folder:
|
||||
try:
|
||||
folder_content = doc_master(
|
||||
folder_path=str(doc_folder), output_type="string"
|
||||
)
|
||||
processed_docs.append(folder_content)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to process folder {doc_folder}: {str(e)}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Folder processing failed: {str(e)}"
|
||||
)
|
||||
|
||||
# Combine and chunk the processed documents
|
||||
combined_data = "\n".join(processed_docs)
|
||||
|
||||
# Update agent prompts in chunks to avoid memory issues
|
||||
for agent in agents.values():
|
||||
try:
|
||||
for i in range(0, len(combined_data), chunk_size):
|
||||
chunk = combined_data[i : i + chunk_size]
|
||||
if i == 0:
|
||||
agent.system_prompt += (
|
||||
"\nDocuments:\n" + chunk
|
||||
)
|
||||
else:
|
||||
agent.system_prompt += chunk
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to update agent prompt: {str(e)}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Agent prompt update failed: {str(e)}"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Successfully added documents to {len(agents)} agents"
|
||||
)
|
||||
|
||||
return agents
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Document distribution failed: {str(e)}")
|
||||
raise RuntimeError(f"Document distribution failed: {str(e)}")
|
@ -0,0 +1,102 @@
|
||||
from typing import Union, Dict, List, Tuple, Any
|
||||
|
||||
|
||||
def any_to_str(data: Union[str, Dict, List, Tuple, Any]) -> str:
|
||||
"""Convert any input data type to a nicely formatted string.
|
||||
|
||||
This function handles conversion of various Python data types into a clean string representation.
|
||||
It recursively processes nested data structures and handles None values gracefully.
|
||||
|
||||
Args:
|
||||
data: Input data of any type to convert to string. Can be:
|
||||
- Dictionary
|
||||
- List/Tuple
|
||||
- String
|
||||
- None
|
||||
- Any other type that can be converted via str()
|
||||
|
||||
Returns:
|
||||
str: A formatted string representation of the input data.
|
||||
- Dictionaries are formatted as "key: value" pairs separated by commas
|
||||
- Lists/tuples are comma-separated
|
||||
- None returns empty string
|
||||
- Other types are converted using str()
|
||||
|
||||
Examples:
|
||||
>>> any_to_str({'a': 1, 'b': 2})
|
||||
'a: 1, b: 2'
|
||||
>>> any_to_str([1, 2, 3])
|
||||
'1, 2, 3'
|
||||
>>> any_to_str(None)
|
||||
''
|
||||
"""
|
||||
try:
|
||||
if isinstance(data, dict):
|
||||
# Format dictionary with newlines and indentation
|
||||
items = []
|
||||
for k, v in data.items():
|
||||
value = any_to_str(v)
|
||||
items.append(f"{k}: {value}")
|
||||
return "\n".join(items)
|
||||
|
||||
elif isinstance(data, (list, tuple)):
|
||||
# Format sequences with brackets and proper spacing
|
||||
items = [any_to_str(x) for x in data]
|
||||
if len(items) == 0:
|
||||
return "[]" if isinstance(data, list) else "()"
|
||||
return (
|
||||
f"[{', '.join(items)}]"
|
||||
if isinstance(data, list)
|
||||
else f"({', '.join(items)})"
|
||||
)
|
||||
|
||||
elif data is None:
|
||||
return "None"
|
||||
|
||||
else:
|
||||
# Handle strings and other types
|
||||
if isinstance(data, str):
|
||||
return f'"{data}"'
|
||||
return str(data)
|
||||
|
||||
except Exception as e:
|
||||
return f"Error converting data: {str(e)}"
|
||||
|
||||
|
||||
def main():
|
||||
# Example 1: Dictionary
|
||||
print("Dictionary:")
|
||||
print(
|
||||
any_to_str(
|
||||
{
|
||||
"name": "John",
|
||||
"age": 30,
|
||||
"hobbies": ["reading", "hiking"],
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
print("\nNested Dictionary:")
|
||||
print(
|
||||
any_to_str(
|
||||
{
|
||||
"user": {
|
||||
"id": 123,
|
||||
"details": {"city": "New York", "active": True},
|
||||
},
|
||||
"data": [1, 2, 3],
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
print("\nList and Tuple:")
|
||||
print(any_to_str([1, "text", None, (1, 2)]))
|
||||
print(any_to_str((True, False, None)))
|
||||
|
||||
print("\nEmpty Collections:")
|
||||
print(any_to_str([]))
|
||||
print(any_to_str({}))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,127 +0,0 @@
|
||||
import time
|
||||
from os import cpu_count
|
||||
from typing import Any, Callable, List, Optional
|
||||
|
||||
from loguru import logger
|
||||
from pathos.multiprocessing import ProcessingPool as Pool
|
||||
|
||||
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
def execute_parallel_optimized(
|
||||
callables_with_args: List[
|
||||
Tuple[Callable[..., Any], Tuple[Any, ...]]
|
||||
],
|
||||
max_workers: Optional[int] = None,
|
||||
chunk_size: Optional[int] = None,
|
||||
retries: int = 3,
|
||||
**kwargs,
|
||||
) -> List[Any]:
|
||||
"""
|
||||
Executes a list of callables in parallel, leveraging all available CPU cores.
|
||||
|
||||
This function is optimized for high performance and reliability.
|
||||
|
||||
Args:
|
||||
callables_with_args (List[Tuple[Callable[..., Any], Tuple[Any, ...]]]):
|
||||
A list of tuples, where each tuple contains a callable and a tuple of its arguments.
|
||||
max_workers (Optional[int]): The maximum number of workers to use. Defaults to the number of available cores.
|
||||
chunk_size (Optional[int]): The size of chunks to split the tasks into for balanced execution. Defaults to automatic chunking.
|
||||
retries (int): Number of retries for a failed task. Default is 3.
|
||||
|
||||
Returns:
|
||||
List[Any]: A list of results from each callable. The order corresponds to the order of the input list.
|
||||
|
||||
Raises:
|
||||
Exception: Any exception raised by the callable will be logged and re-raised after retries are exhausted.
|
||||
"""
|
||||
max_workers = cpu_count() if max_workers is None else max_workers
|
||||
results = []
|
||||
logger.info(
|
||||
f"Starting optimized parallel execution of {len(callables_with_args)} tasks."
|
||||
)
|
||||
|
||||
pool = Pool(
|
||||
nodes=max_workers, **kwargs
|
||||
) # Initialize the pool once
|
||||
|
||||
def _execute_with_retry(callable_, args, retries):
|
||||
attempt = 0
|
||||
while attempt < retries:
|
||||
try:
|
||||
result = callable_(*args)
|
||||
logger.info(
|
||||
f"Task {callable_} with args {args} completed successfully."
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
attempt += 1
|
||||
logger.warning(
|
||||
f"Task {callable_} with args {args} failed on attempt {attempt}: {e}"
|
||||
)
|
||||
time.sleep(1) # Small delay before retrying
|
||||
if attempt >= retries:
|
||||
logger.error(
|
||||
f"Task {callable_} with args {args} failed after {retries} retries."
|
||||
)
|
||||
raise
|
||||
|
||||
try:
|
||||
if chunk_size is None:
|
||||
chunk_size = (
|
||||
len(callables_with_args)
|
||||
// (max_workers or pool.ncpus)
|
||||
or 1
|
||||
)
|
||||
|
||||
# Use chunking and mapping for efficient execution
|
||||
results = pool.map(
|
||||
lambda item: _execute_with_retry(
|
||||
item[0], item[1], retries
|
||||
),
|
||||
callables_with_args,
|
||||
chunksize=chunk_size,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.critical(
|
||||
f"Parallel execution failed due to an error: {e}"
|
||||
)
|
||||
raise
|
||||
|
||||
logger.info(
|
||||
f"Optimized parallel execution completed. {len(results)} tasks executed."
|
||||
)
|
||||
pool.close() # Ensure pool is properly closed
|
||||
pool.join()
|
||||
|
||||
|
||||
# return results
|
||||
|
||||
|
||||
# def add(a, b):
|
||||
# return a + b
|
||||
|
||||
|
||||
# def multiply(a, b):
|
||||
# return a * b
|
||||
|
||||
|
||||
# def power(a, b):
|
||||
# return a**b
|
||||
|
||||
|
||||
# # if __name__ == "__main__":
|
||||
# # # List of callables with their respective arguments
|
||||
# # callables_with_args = [
|
||||
# # (add, (2, 3)),
|
||||
# # (multiply, (5, 4)),
|
||||
# # (power, (2, 10)),
|
||||
# # ]
|
||||
|
||||
# # # Execute the callables in parallel
|
||||
# # results = execute_parallel_optimized(callables_with_args)
|
||||
|
||||
# # # Print the results
|
||||
# # print("Results:", results)
|
@ -1,75 +0,0 @@
|
||||
from loguru import logger
|
||||
import sys
|
||||
import platform
|
||||
import os
|
||||
import datetime
|
||||
|
||||
# Configuring loguru to log to both the console and a file
|
||||
logger.remove() # Remove default logger configuration
|
||||
logger.add(
|
||||
sys.stderr,
|
||||
level="INFO",
|
||||
format="<green>{time}</green> - <level>{level}</level> - <level>{message}</level>",
|
||||
)
|
||||
|
||||
logger.add(
|
||||
"info.log", level="INFO", format="{time} - {level} - {message}"
|
||||
)
|
||||
|
||||
|
||||
def log_success_message() -> None:
|
||||
"""
|
||||
Logs a success message with instructions for sharing agents on the Swarms Agent Explorer and joining the community for assistance.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Raises:
|
||||
None
|
||||
"""
|
||||
# Gather extensive context information
|
||||
context_info = {
|
||||
"timestamp": datetime.datetime.now().isoformat(),
|
||||
"python_version": platform.python_version(),
|
||||
"platform": platform.platform(),
|
||||
"machine": platform.machine(),
|
||||
"processor": platform.processor(),
|
||||
"user": os.getenv("USER") or os.getenv("USERNAME"),
|
||||
"current_working_directory": os.getcwd(),
|
||||
}
|
||||
|
||||
success_message = (
|
||||
f"\n"
|
||||
f"#########################################\n"
|
||||
f"# #\n"
|
||||
f"# SUCCESSFUL RUN DETECTED! #\n"
|
||||
f"# #\n"
|
||||
f"#########################################\n"
|
||||
f"\n"
|
||||
f"Your task completed successfully!\n"
|
||||
f"\n"
|
||||
f"Context Information:\n"
|
||||
f"-----------------------------------------\n"
|
||||
f"Timestamp: {context_info['timestamp']}\n"
|
||||
f"Python Version: {context_info['python_version']}\n"
|
||||
f"Platform: {context_info['platform']}\n"
|
||||
f"Machine: {context_info['machine']}\n"
|
||||
f"Processor: {context_info['processor']}\n"
|
||||
f"User: {context_info['user']}\n"
|
||||
f"Current Working Directory: {context_info['current_working_directory']}\n"
|
||||
f"-----------------------------------------\n"
|
||||
f"\n"
|
||||
f"Share your agents on the Swarms Agent Explorer with friends:\n"
|
||||
f"https://swarms.world/platform/explorer\n"
|
||||
f"\n"
|
||||
f"Join the Swarms community if you want assistance or help debugging:\n"
|
||||
f"https://discord.gg/uzu63HQx\n"
|
||||
f"\n"
|
||||
f"#########################################\n"
|
||||
)
|
||||
|
||||
logger.info(success_message)
|
||||
|
||||
|
||||
# Example usage:
|
||||
# log_success_message()
|
@ -0,0 +1,34 @@
|
||||
from typing import Union, Dict, List
|
||||
from swarms.artifacts.main_artifact import Artifact
|
||||
|
||||
|
||||
def handle_artifact_outputs(
|
||||
file_path: str,
|
||||
data: Union[str, Dict, List],
|
||||
output_type: str = "txt",
|
||||
folder_path: str = "./artifacts",
|
||||
) -> str:
|
||||
"""
|
||||
Handle different types of data and create files in various formats.
|
||||
|
||||
Args:
|
||||
file_path: Path where the file should be saved
|
||||
data: Input data that can be string, dict or list
|
||||
output_type: Type of output file (txt, md, pdf, csv, json)
|
||||
folder_path: Folder to save artifacts
|
||||
|
||||
Returns:
|
||||
str: Path to the created file
|
||||
"""
|
||||
# Create artifact with appropriate file type
|
||||
artifact = Artifact(
|
||||
folder_path=folder_path,
|
||||
file_path=file_path,
|
||||
file_type=output_type,
|
||||
contents=data,
|
||||
edit_count=0,
|
||||
)
|
||||
|
||||
# Save the file
|
||||
# artifact.save()
|
||||
artifact.save_as(output_format=output_type)
|
@ -0,0 +1,78 @@
|
||||
from loguru import logger
|
||||
from typing import List, Union, Callable, Optional
|
||||
from swarms.structs.agent import Agent
|
||||
|
||||
|
||||
def reliability_check(
|
||||
agents: List[Union[Agent, Callable]],
|
||||
max_loops: int,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
flow: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Performs reliability checks on swarm configuration parameters.
|
||||
|
||||
Args:
|
||||
agents: List of Agent objects or callables that will be executed
|
||||
max_loops: Maximum number of execution loops
|
||||
name: Name identifier for the swarm
|
||||
description: Description of the swarm's purpose
|
||||
|
||||
Raises:
|
||||
ValueError: If any parameters fail validation checks
|
||||
TypeError: If parameters are of incorrect type
|
||||
"""
|
||||
logger.info("Initializing swarm reliability checks")
|
||||
|
||||
# Type checking
|
||||
if not isinstance(agents, list):
|
||||
raise TypeError("agents parameter must be a list")
|
||||
|
||||
if not isinstance(max_loops, int):
|
||||
raise TypeError("max_loops must be an integer")
|
||||
|
||||
# Validate agents
|
||||
if not agents:
|
||||
raise ValueError("Agents list cannot be empty")
|
||||
|
||||
for i, agent in enumerate(agents):
|
||||
if not isinstance(agent, (Agent, Callable)):
|
||||
raise TypeError(
|
||||
f"Agent at index {i} must be an Agent instance or Callable"
|
||||
)
|
||||
|
||||
# Validate max_loops
|
||||
if max_loops <= 0:
|
||||
raise ValueError("max_loops must be greater than 0")
|
||||
|
||||
if max_loops > 1000:
|
||||
logger.warning(
|
||||
"Large max_loops value detected. This may impact performance."
|
||||
)
|
||||
|
||||
# Validate name
|
||||
if name is None:
|
||||
raise ValueError("name parameter is required")
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("name must be a string")
|
||||
if len(name.strip()) == 0:
|
||||
raise ValueError("name cannot be empty or just whitespace")
|
||||
|
||||
# Validate description
|
||||
if description is None:
|
||||
raise ValueError("description parameter is required")
|
||||
if not isinstance(description, str):
|
||||
raise TypeError("description must be a string")
|
||||
if len(description.strip()) == 0:
|
||||
raise ValueError(
|
||||
"description cannot be empty or just whitespace"
|
||||
)
|
||||
|
||||
# Validate flow
|
||||
if flow is None:
|
||||
raise ValueError("flow parameter is required")
|
||||
if not isinstance(flow, str):
|
||||
raise TypeError("flow must be a string")
|
||||
|
||||
logger.info("All reliability checks passed successfully")
|
@ -0,0 +1,77 @@
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from clusterops import (
|
||||
execute_on_gpu,
|
||||
execute_on_multiple_gpus,
|
||||
execute_with_cpu_cores,
|
||||
list_available_gpus,
|
||||
)
|
||||
from loguru import logger
|
||||
|
||||
|
||||
def exec_callable_with_clusterops(
|
||||
device: str = "cpu",
|
||||
device_id: int = 0,
|
||||
all_cores: bool = True,
|
||||
all_gpus: bool = False,
|
||||
func: callable = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> Any:
|
||||
"""
|
||||
Executes a given function on a specified device, either CPU or GPU.
|
||||
|
||||
This method attempts to execute a given function on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`.
|
||||
|
||||
Args:
|
||||
device (str, optional): The device to use for execution. Defaults to "cpu".
|
||||
device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
|
||||
all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
|
||||
all_gpus (bool, optional): If True, uses all available GPUs. Defaults to False.
|
||||
func (callable): The function to execute.
|
||||
*args: Additional positional arguments to be passed to the execution method.
|
||||
**kwargs: Additional keyword arguments to be passed to the execution method.
|
||||
|
||||
Returns:
|
||||
Any: The result of the execution.
|
||||
|
||||
Raises:
|
||||
ValueError: If an invalid device is specified.
|
||||
Exception: If any other error occurs during execution.
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Attempting to run on device: {device}")
|
||||
if device == "cpu":
|
||||
logger.info("Device set to CPU")
|
||||
if all_cores is True:
|
||||
count = os.cpu_count()
|
||||
logger.info(f"Using all available CPU cores: {count}")
|
||||
else:
|
||||
count = device_id
|
||||
logger.info(f"Using specific CPU core: {count}")
|
||||
|
||||
return execute_with_cpu_cores(
|
||||
count, func, *args, **kwargs
|
||||
)
|
||||
|
||||
# If device gpu
|
||||
elif device == "gpu":
|
||||
logger.info("Device set to GPU")
|
||||
return execute_on_gpu(device_id, func, *args, **kwargs)
|
||||
elif device == "gpu" and all_gpus is True:
|
||||
logger.info("Device set to GPU and running all gpus")
|
||||
gpus = [int(gpu) for gpu in list_available_gpus()]
|
||||
return execute_on_multiple_gpus(
|
||||
gpus, func, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid device specified: {device}. Supported devices are 'cpu' and 'gpu'."
|
||||
)
|
||||
except ValueError as e:
|
||||
logger.error(f"Invalid device specified: {e}")
|
||||
raise e
|
||||
except Exception as e:
|
||||
logger.error(f"An error occurred during execution: {e}")
|
||||
raise e
|
@ -1,117 +0,0 @@
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.message_pool import MessagePool
|
||||
|
||||
|
||||
def test_message_pool_initialization():
|
||||
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||
agent2 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||
moderator = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||
agents = [agent1, agent2]
|
||||
message_pool = MessagePool(
|
||||
agents=agents, moderator=moderator, turns=5
|
||||
)
|
||||
|
||||
assert message_pool.agent == agents
|
||||
assert message_pool.moderator == moderator
|
||||
assert message_pool.turns == 5
|
||||
assert message_pool.messages == []
|
||||
|
||||
|
||||
def test_message_pool_add():
|
||||
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||
message_pool = MessagePool(
|
||||
agents=[agent1], moderator=agent1, turns=5
|
||||
)
|
||||
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||
|
||||
assert message_pool.messages == [
|
||||
{
|
||||
"agent": agent1,
|
||||
"content": "Hello, world!",
|
||||
"turn": 1,
|
||||
"visible_to": "all",
|
||||
"logged": True,
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_message_pool_reset():
|
||||
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||
message_pool = MessagePool(
|
||||
agents=[agent1], moderator=agent1, turns=5
|
||||
)
|
||||
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||
message_pool.reset()
|
||||
|
||||
assert message_pool.messages == []
|
||||
|
||||
|
||||
def test_message_pool_last_turn():
|
||||
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||
message_pool = MessagePool(
|
||||
agents=[agent1], moderator=agent1, turns=5
|
||||
)
|
||||
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||
|
||||
assert message_pool.last_turn() == 1
|
||||
|
||||
|
||||
def test_message_pool_last_message():
|
||||
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||
message_pool = MessagePool(
|
||||
agents=[agent1], moderator=agent1, turns=5
|
||||
)
|
||||
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||
|
||||
assert message_pool.last_message == {
|
||||
"agent": agent1,
|
||||
"content": "Hello, world!",
|
||||
"turn": 1,
|
||||
"visible_to": "all",
|
||||
"logged": True,
|
||||
}
|
||||
|
||||
|
||||
def test_message_pool_get_all_messages():
|
||||
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||
message_pool = MessagePool(
|
||||
agents=[agent1], moderator=agent1, turns=5
|
||||
)
|
||||
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||
|
||||
assert message_pool.get_all_messages() == [
|
||||
{
|
||||
"agent": agent1,
|
||||
"content": "Hello, world!",
|
||||
"turn": 1,
|
||||
"visible_to": "all",
|
||||
"logged": True,
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_message_pool_get_visible_messages():
|
||||
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||
agent2 = Agent(agent_name="agent2")
|
||||
message_pool = MessagePool(
|
||||
agents=[agent1, agent2], moderator=agent1, turns=5
|
||||
)
|
||||
message_pool.add(
|
||||
agent=agent1,
|
||||
content="Hello, agent2!",
|
||||
turn=1,
|
||||
visible_to=[agent2.agent_name],
|
||||
)
|
||||
|
||||
assert message_pool.get_visible_messages(
|
||||
agent=agent2, turn=2
|
||||
) == [
|
||||
{
|
||||
"agent": agent1,
|
||||
"content": "Hello, agent2!",
|
||||
"turn": 1,
|
||||
"visible_to": [agent2.agent_name],
|
||||
"logged": True,
|
||||
}
|
||||
]
|
Loading…
Reference in new issue