pull/584/head
Your Name 4 months ago
parent fb62dda1da
commit 13f45be8ad

@ -38,15 +38,7 @@
1. **Scalability and Efficiency:** Orchestrate thousands of agents to automate complex business processes, saving time and boosting operational efficiency.
2. **Framework-Agnostic Integration:** Seamlessly integrate with any agent framework, including LangChain, AutoGen, and more, allowing for flexible deployment without infrastructure changes.
3. **Production-Grade Function Calling:** Execute complex functions with high accuracy and low latency, ensuring reliability in mission-critical production environments.
4. **Centralized Swarm Management:** Manage massive swarms of agents with centralized dashboards, real-time analytics, and detailed logging to maintain control and optimize performance.
Swarms is an enterprise grade and production ready multi-agent collaboration framework that enables you to orchestrate many agents to work collaboratively at scale to automate real-world activities.
----
@ -67,7 +59,7 @@ $ pip3 install -U swarms
# Usage Examples 🤖
Here are some simple examples but we have more comprehensive documentation at our [docs here](https://docs.swarms.world/en/latest/)
Run example in Collab: <a target="_blank" href="https://colab.research.google.com/github/kyegomez/swarms/blob/master/examples/collab/swarms_example.ipynb">
Run example in Collab: <a target="_blank" href="https://colab.research.google.com/github/kyegomez/swarms/blob/master/examples/collabs/swarms_example.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
@ -90,13 +82,16 @@ from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from dotenv import load_dotenv
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agent
@ -113,7 +108,8 @@ agent = Agent(
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=False
return_step_meta=False,
# output_type="json",
)
@ -351,9 +347,6 @@ agent.tokens_checks()
# Print the dashboard of the agent
agent.print_dashboard()
# Print the history and memory of the agent
agent.print_history_and_memory()
# Fetch all the documents from the doc folders
agent.get_docs_from_doc_folders()
@ -668,6 +661,7 @@ workflow.run(
Inspired by Einops and einsum, this orchestration techniques enables you to map out the relationships between various agents. For example you specify linear and sequential relationships like `a -> a1 -> a2 -> a3` or concurrent relationships where the first agent will send a message to 3 agents all at once: `a -> a1, a2, a3`. You can customize your workflow to mix sequential and concurrent relationships. [Docs Available:](https://swarms.apac.ai/en/latest/swarms/structs/agent_rearrange/)
```python
from swarms import Agent, AgentRearrange, Anthropic
@ -948,7 +942,7 @@ swarm = SpreadSheetSwarm(
autosave_on=True,
save_file_path="real_estate_marketing_spreadsheet.csv",
run_all_agents=False,
repeat_count=2,
max_loops=2,
)
# Run the swarm

@ -0,0 +1,100 @@
import os
from swarms import Agent, OpenAIChat
from swarms.structs.company import Company
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the boss agent (Director)
boss_agent = Agent(
agent_name="BossAgent",
system_prompt="""
You are the BossAgent responsible for managing and overseeing a swarm of agents analyzing company expenses.
Your job is to dynamically assign tasks, prioritize their execution, and ensure that all agents collaborate efficiently.
After receiving a report on the company's expenses, you will break down the work into smaller tasks,
assigning specific tasks to each agent, such as detecting recurring high costs, categorizing expenditures,
and identifying unnecessary transactions. Ensure the results are communicated back in a structured way
so the finance team can take actionable steps to cut off unproductive spending. You also monitor and
dynamically adapt the swarm to optimize their performance. Finally, you summarize their findings
into a coherent report.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="boss_agent.json",
)
# Initialize worker 1: Expense Analyzer
worker1 = Agent(
agent_name="ExpenseAnalyzer",
system_prompt="""
Your task is to carefully analyze the company's expense data provided to you.
You will focus on identifying high-cost recurring transactions, categorizing expenditures
(e.g., marketing, operations, utilities, etc.), and flagging areas where there seems to be excessive spending.
You will provide a detailed breakdown of each category, along with specific recommendations for cost-cutting.
Pay close attention to monthly recurring subscriptions, office supplies, and non-essential expenditures.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker1.json",
)
# Initialize worker 2: Summary Generator
worker2 = Agent(
agent_name="SummaryGenerator",
system_prompt="""
After receiving the detailed breakdown from the ExpenseAnalyzer,
your task is to create a concise summary of the findings. You will focus on the most actionable insights,
such as highlighting the specific transactions that can be immediately cut off and summarizing the areas
where the company is overspending. Your summary will be used by the BossAgent to generate the final report.
Be clear and to the point, emphasizing the urgency of cutting unnecessary expenses.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker2.json",
)
# Swarm-Level Prompt (Collaboration Prompt)
swarm_prompt = """
As a swarm, your collective goal is to analyze the company's expenses and identify transactions that should be cut off.
You will work collaboratively to break down the entire process of expense analysis into manageable steps.
The BossAgent will direct the flow and assign tasks dynamically to the agents. The ExpenseAnalyzer will first
focus on breaking down the expense report, identifying high-cost recurring transactions, categorizing them,
and providing recommendations for potential cost reduction. After the analysis, the SummaryGenerator will then
consolidate all the findings into an actionable summary that the finance team can use to immediately cut off unnecessary expenses.
Together, your collaboration is essential to streamlining and improving the companys financial health.
"""
# Create a list of agents
agents = [boss_agent, worker1, worker2]
# Create an organization chart
org_chart = [[boss_agent], [worker1, worker2]]
# Create a company
company = Company(org_chart=org_chart)
# Run the company
company.run()

@ -0,0 +1,100 @@
import os
from swarms import Agent, OpenAIChat
from swarms.structs.company import Company
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the boss agent (Director)
boss_agent = Agent(
agent_name="BossAgent",
system_prompt="""
You are the BossAgent responsible for managing and overseeing a swarm of agents analyzing company expenses.
Your job is to dynamically assign tasks, prioritize their execution, and ensure that all agents collaborate efficiently.
After receiving a report on the company's expenses, you will break down the work into smaller tasks,
assigning specific tasks to each agent, such as detecting recurring high costs, categorizing expenditures,
and identifying unnecessary transactions. Ensure the results are communicated back in a structured way
so the finance team can take actionable steps to cut off unproductive spending. You also monitor and
dynamically adapt the swarm to optimize their performance. Finally, you summarize their findings
into a coherent report.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="boss_agent.json",
)
# Initialize worker 1: Expense Analyzer
worker1 = Agent(
agent_name="ExpenseAnalyzer",
system_prompt="""
Your task is to carefully analyze the company's expense data provided to you.
You will focus on identifying high-cost recurring transactions, categorizing expenditures
(e.g., marketing, operations, utilities, etc.), and flagging areas where there seems to be excessive spending.
You will provide a detailed breakdown of each category, along with specific recommendations for cost-cutting.
Pay close attention to monthly recurring subscriptions, office supplies, and non-essential expenditures.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker1.json",
)
# Initialize worker 2: Summary Generator
worker2 = Agent(
agent_name="SummaryGenerator",
system_prompt="""
After receiving the detailed breakdown from the ExpenseAnalyzer,
your task is to create a concise summary of the findings. You will focus on the most actionable insights,
such as highlighting the specific transactions that can be immediately cut off and summarizing the areas
where the company is overspending. Your summary will be used by the BossAgent to generate the final report.
Be clear and to the point, emphasizing the urgency of cutting unnecessary expenses.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker2.json",
)
# Swarm-Level Prompt (Collaboration Prompt)
swarm_prompt = """
As a swarm, your collective goal is to analyze the company's expenses and identify transactions that should be cut off.
You will work collaboratively to break down the entire process of expense analysis into manageable steps.
The BossAgent will direct the flow and assign tasks dynamically to the agents. The ExpenseAnalyzer will first
focus on breaking down the expense report, identifying high-cost recurring transactions, categorizing them,
and providing recommendations for potential cost reduction. After the analysis, the SummaryGenerator will then
consolidate all the findings into an actionable summary that the finance team can use to immediately cut off unnecessary expenses.
Together, your collaboration is essential to streamlining and improving the companys financial health.
"""
# Create a list of agents
agents = [boss_agent, worker1, worker2]
# Create an organization chart
org_chart = [[boss_agent], [worker1, worker2]]
# Create a company
company = Company(org_chart=org_chart)
# Run the company
company.run()

@ -375,7 +375,7 @@ from dotenv import load_dotenv
from swarms.utils.data_to_text import data_to_text
from swarms.utils.markdown_message import display_markdown_message
from swarms.memory.base_vectordb import AbstractVectorDatabase
from swarms_memory import AbstractVectorDatabase
# Results storage using local ChromaDB

@ -0,0 +1,56 @@
# Careers at Swarms
We are a team of engineers, developers, and visionaries on a mission to build the future of AI by orchestrating multi-agent collaboration. We move fast, think ambitiously, and deliver with urgency. Join us if you want to be part of building the next generation of multi-agent systems, redefining how businesses automate operations and leverage AI.
**We offer none of the following benefits Yet:**
- No medical, dental, or vision insurance
- No paid time off
- No life or AD&D insurance
- No short-term or long-term disability insurance
- No 401(k) plan
**Working hours:** 9 AM to 10 PM, every day, 7 days a week. This is not for people who seek work-life balance.
---
### Hiring Process: How to Join Swarms
We have a simple 3-step hiring process:
**NOTE** We do not consider applicants who have not previously submitted a PR, to be considered a PR containing a new feature of a bug fixed must be submitted.
1. **Submit a pull request (PR)**: Start by submitting an approved PR to the [Swarms GitHub repository](https://github.com/kyegomez/swarms) or the appropriate repository .
2. **Code review**: Our technical team will review your PR. If it meets our standards, you will be invited for a quick interview.
3. **Final interview**: Discuss your contributions and approach with our team. If you pass, you're in!
There are no recruiters. All evaluations are done by our technical team.
---
# Location
- **Palo Alto** CA Our Palo Alto office houses the majority of our core research teams including our prompting, agent design, and model training
- **Miami** Our miami office holds prompt engineering, agent design, and more.
### Open Roles at Swarms
**Infrastructure Engineer**
- Build and maintain the systems that run our AI multi-agent infrastructure.
- Expertise in Skypilot, AWS, Terraform.
- Ensure seamless, high-availability environments for agent operations.
**Agent Engineer**
- Design, develop, and orchestrate complex swarms of AI agents.
- Extensive experience with Python, multi-agent systems, and neural networks.
- Ability to create dynamic and efficient agent architectures from scratch.
**Prompt Engineer**
- Craft highly optimized prompts that drive our LLM-based agents.
- Specialize in instruction-based prompts, multi-shot examples, and production-grade deployment.
- Collaborate with agents to deliver state-of-the-art solutions.
**Front-End Engineer**
- Build sleek, intuitive interfaces for interacting with swarms of agents.
- Proficiency in Next.js, FastAPI, and modern front-end technologies.
- Design with the user experience in mind, integrating complex AI features into simple workflows.

@ -0,0 +1,56 @@
# Careers at Swarms
We are a team of engineers, developers, and visionaries on a mission to build the future of AI by orchestrating multi-agent collaboration. We move fast, think ambitiously, and deliver with urgency. Join us if you want to be part of building the next generation of multi-agent systems, redefining how businesses automate operations and leverage AI.
**We offer none of the following benefits Yet:**
- No medical, dental, or vision insurance
- No paid time off
- No life or AD&D insurance
- No short-term or long-term disability insurance
- No 401(k) plan
**Working hours:** 9 AM to 10 PM, every day, 7 days a week. This is not for people who seek work-life balance.
---
### Hiring Process: How to Join Swarms
We have a simple 3-step hiring process:
**NOTE** We do not consider applicants who have not previously submitted a PR, to be considered a PR containing a new feature of a bug fixed must be submitted.
1. **Submit a pull request (PR)**: Start by submitting an approved PR to the [Swarms GitHub repository](https://github.com/kyegomez/swarms) or the appropriate repository .
2. **Code review**: Our technical team will review your PR. If it meets our standards, you will be invited for a quick interview.
3. **Final interview**: Discuss your contributions and approach with our team. If you pass, you're in!
There are no recruiters. All evaluations are done by our technical team.
---
# Location
- **Palo Alto** CA Our Palo Alto office houses the majority of our core research teams including our prompting, agent design, and model training
- **Miami** Our miami office holds prompt engineering, agent design, and more.
### Open Roles at Swarms
**Infrastructure Engineer**
- Build and maintain the systems that run our AI multi-agent infrastructure.
- Expertise in Skypilot, AWS, Terraform.
- Ensure seamless, high-availability environments for agent operations.
**Agent Engineer**
- Design, develop, and orchestrate complex swarms of AI agents.
- Extensive experience with Python, multi-agent systems, and neural networks.
- Ability to create dynamic and efficient agent architectures from scratch.
**Prompt Engineer**
- Craft highly optimized prompts that drive our LLM-based agents.
- Specialize in instruction-based prompts, multi-shot examples, and production-grade deployment.
- Collaborate with agents to deliver state-of-the-art solutions.
**Front-End Engineer**
- Build sleek, intuitive interfaces for interacting with swarms of agents.
- Proficiency in Next.js, FastAPI, and modern front-end technologies.
- Design with the user experience in mind, integrating complex AI features into simple workflows.

@ -19,7 +19,7 @@ Here you'll find references about the Swarms framework, marketplace, community,
| Community | [Discord](https://discord.com/servers/agora-999382051935506503) |
| Blog | [Blog](https://medium.com/@kyeg) |
| Social Media | [Twitter](https://x.com/swarms_corp) |
| Event Calendar | [Twitter](https://lu.ma/user/usr-GPa2xaC3pL1Hunp) |
| Event Calendar | [Twitter](https://lu.ma/swarms_calendar/usr-GPa2xaC3pL1Hunp) |

@ -132,16 +132,19 @@ markdown_extensions:
nav:
- Home:
- Overview: "index.md"
# - Swarm Ecosystem: "swarms/ecosystem.md"
- The Vision: "swarms/framework/vision.md"
# - Philosophy: "swarms/framework/philosophy.md"
# - Roadmap: "swarms/framework/roadmap.md"
- Swarms Python Framework:
- Install: "swarms/install/install.md"
- Swarms Framework Architecture: "swarms/concept/framework_architecture.md"
- Docker Setup: "swarms/install/docker_setup.md"
<<<<<<< HEAD
# - Multi-Agent Repository Template: "swarms/install/multi_agent_template.md"
# - Getting Started with Agents: "swarms/install/getting_started.md"
# - Getting Started with Multi-Agent Collaboration
=======
- Multi-Agent Repository Template: "swarms/install/multi_agent_template.md"
- Contributing: "swarms/contributing.md"
>>>>>>> ce359f5e ([5.6.8])
- Models:
- Overview: "swarms/models/index.md"
- How to Create A Custom Language Model: "swarms/models/custom_model.md"
@ -177,10 +180,17 @@ nav:
- Multi Agent Collaboration:
# - Overview: "swarms/structs/multi_agent_orchestration.md"
- Swarm Architectures: "swarms/concept/swarm_architectures.md"
<<<<<<< HEAD
# - Multi-Agent Workflows: "swarms/structs/multi_agent_collaboration_examples.md"
- Conversation: "swarms/structs/conversation.md"
# - SwarmNetwork: "swarms/structs/swarm_network.md"
- MajorityVoting: "swarms/structs/majorityvoting.md"
=======
- Multi-Agent Workflows: "swarms/structs/multi_agent_collaboration_examples.md"
- How to choose the right Swarm Architecture: "swarms/concept/how_to_choose_swarms.md"
# - SwarmNetwork: "swarms/structs/swarm_network.md"
# - MajorityVoting: "swarms/structs/majorityvoting.md"
>>>>>>> ce359f5e ([5.6.8])
- AgentRearrange: "swarms/structs/agent_rearrange.md"
- RoundRobin: "swarms/structs/round_robin_swarm.md"
- Mixture of Agents: "swarms/structs/moa.md"
@ -191,15 +201,23 @@ nav:
- AgentRegistry: "swarms/structs/agent_registry.md"
- SpreadSheetSwarm: "swarms/structs/spreadsheet_swarm.md"
# - Workflows: "swarms/structs/workflows.md"
# - Workflows:
- Workflows:
# - BaseWorkflow: "swarms/structs/base_workflow.md"
# - ConcurrentWorkflow: "swarms/structs/concurrentworkflow.md"
- ConcurrentWorkflow: "swarms/structs/concurrentworkflow.md"
# - SequentialWorkflow: "swarms/structs/sequential_workflow.md"
# - MultiProcessingWorkflow: "swarms/structs/multi_processing_workflow.md"
# - MultiProcessingWorkflow: "swarms/structs/multi_processing_workflow.md"\
# - AsyncWorkflow: "swarms/structs/async_workflow.md"
- Structs:
<<<<<<< HEAD
- BaseStructure: "swarms/structs/basestructure.md"
- YamlModel: "swarms/structs/yaml_model.md"
- Artifacts: "swarms/artifacts/artifact.md"
=======
# - BaseStructure: "swarms/structs/basestructure.md"
- Conversation: "swarms/structs/conversation.md"
- Task: "swarms/structs/task.md"
# - YamlModel: "swarms/structs/yaml_model.md"
>>>>>>> ce359f5e ([5.6.8])
- Contributing:
- Tests: "swarms/framework/test.md"
- Contributing: "contributing.md"
@ -231,7 +249,19 @@ nav:
- PUT: "swarms_platform/telemetry/index.md"
# - Tools API:
# - Overview: "swarms_platform/tools_api.md"
<<<<<<< HEAD
# # - Add Tools: "swarms_platform/fetch_tools.md"
# - Guides:
# - Understanding Agent Evaluation Mechanisms: "guides/agent_evals.md"
# - Agent Glossary: "swarms/glossary.md"
=======
# - Add Tools: "swarms_platform/fetch_tools.md"
- Guides:
- Understanding Agent Evaluation Mechanisms: "guides/agent_evals.md"
- Agent Glossary: "swarms/glossary.md"
- Corporate:
- Hiring: "corporate/hiring.md"
>>>>>>> ce359f5e ([5.6.8])

@ -0,0 +1,159 @@
# Swarms Framework Architecture
The Swarms package is designed to orchestrate and manage **swarms of agents**, enabling collaboration between multiple Large Language Models (LLMs) or other agent types to solve complex tasks. The architecture is modular and scalable, facilitating seamless integration of various agents, models, prompts, and tools. Below is an overview of the architectural components, along with instructions on where to find the corresponding documentation.
```
swarms/
├── agents/
├── artifacts/
├── cli/
├── memory/
├── models/
├── prompts/
├── schemas/
├── structs/
├── telemetry/
├── tools/
├── utils/
└── __init__.py
```
### Role of Folders in the Swarms Framework
The **Swarms framework** is composed of several key folders, each serving a specific role in building, orchestrating, and managing swarms of agents. Below is an in-depth explanation of the role of each folder in the framework's architecture, focusing on how they contribute to the overall system for handling complex multi-agent workflows.
---
### **1. Agents Folder (`agents/`)**
- **Role:**
- The **agents** folder contains the core logic for individual agents within the Swarms framework. Agents are the key functional units responsible for carrying out specific tasks, whether it be text generation, web scraping, data analysis, or more specialized functions like marketing or accounting.
- **Customization:** Each agent can be specialized for different tasks by defining custom system prompts and behaviors.
- **Modular Agent System:** New agents can be easily added to this folder to expand the framework's capabilities.
- **Importance:** This folder allows users to create and manage multiple types of agents that can interact and collaborate to solve complex problems.
- **Examples:** Accounting agents, marketing agents, and programming agents.
---
### **2. Artifacts Folder (`artifacts/`)**
- **Role:**
- The **artifacts** folder is responsible for storing the results or outputs generated by agents and swarms. This could include reports, logs, or data that agents generate during task execution.
- **Persistent Storage:** It helps maintain a persistent record of agent interactions, making it easier to retrieve or review past actions and outputs.
- **Data Handling:** Users can configure this folder to store artifacts that are essential for later analysis or reporting.
- **Importance:** Acts as a storage mechanism for important task-related outputs, ensuring that no data is lost after tasks are completed.
---
### **3. CLI Folder (`cli/`)**
- **Role:**
- The **CLI** folder contains tools for interacting with the Swarms framework through the command-line interface. This allows users to easily manage and orchestrate swarms without needing a graphical interface.
- **Command-line Tools:** Commands in this folder enable users to initiate, control, and monitor swarms, making the system accessible and versatile.
- **Automation and Scriptability:** Enables advanced users to automate swarm interactions and deploy agents programmatically.
- **Importance:** Provides a flexible way to control the Swarms system for developers who prefer using the command line.
---
### **4. Memory Folder (`memory/`) Depcriated!!**
- **Role:**
- The **memory** folder handles the framework's memory management for agents. This allows agents to retain and recall past interactions or task contexts, enabling continuity in long-running processes or multi-step workflows.
- **Context Retention:** Agents that depend on historical context to make decisions or carry out tasks can store and access memory using this folder.
- **Long-Term and Short-Term Memory:** This could be implemented in various ways, such as short-term conversational memory or long-term knowledge storage.
- **Importance:** Crucial for agents that require memory to handle complex workflows, where decisions are based on prior outputs or interactions.
---
### **5. Models Folder (`models/`)**
- **Role:**
- The **models** folder houses pre-trained machine learning models that agents utilize to complete their tasks. These models could include LLMs (Large Language Models), custom-trained models, or fine-tuned models specific to the tasks being handled by the agents.
- **Plug-and-Play Architecture:** The framework allows users to easily add or switch models depending on the specific needs of their agents.
- **Custom Model Support:** Users can integrate custom models here for more specialized tasks.
- **Importance:** Provides the computational backbone for agent decision-making and task execution.
---
### **6. Prompts Folder (`prompts/`)**
- **Role:**
- The **prompts** folder contains reusable prompt templates that agents use to interact with their environment and complete tasks. These system prompts define the behavior and task orientation of the agents.
- **Template Reusability:** Users can create and store common prompt templates, making it easy to define agent behavior across different tasks without rewriting prompts from scratch.
- **Task-Specific Prompts:** For example, an accounting agent may have a prompt template that guides its interaction with financial data.
- **Importance:** Provides the logic and guidance agents need to generate outputs in a coherent and task-focused manner.
---
### **7. Schemas Folder (`schemas/`)**
- **Role:**
- The **schemas** folder defines the data structures and validation logic for inputs and outputs within the framework, using tools like **Pydantic** for data validation.
- **Standardization and Validation:** This ensures that all interactions between agents and swarms follow consistent data formats, which is critical for large-scale agent coordination and task management.
- **Error Prevention:** By validating data early, it prevents errors from propagating through the system, improving reliability.
- **Importance:** Ensures data consistency across the entire framework, making it easier to integrate and manage swarms of agents at scale.
---
### **8. Structs Folder (`structs/`)**
- **Role:**
- The **structs** folder is the core of the Swarms framework, housing the orchestration logic for managing and coordinating swarms of agents. This folder allows for dynamic task assignment, queue management, inter-agent communication, and result aggregation.
- **Swarm Management:** Agents are grouped into swarms to handle tasks that require multiple agents working in parallel or collaboratively.
- **Scalability:** The swarm structure is designed to be scalable, allowing thousands of agents to operate together on distributed tasks.
- **Task Queueing and Execution:** Supports task queueing, task prioritization, and load balancing between agents.
- **Importance:** This folder is critical for managing how agents interact and collaborate to solve complex, multi-step problems.
---
### **9. Telemetry Folder (`telemetry/`)**
- **Role:**
- The **telemetry** folder provides logging and monitoring tools to capture agent performance metrics, error handling, and real-time activity tracking. It helps users keep track of what each agent or swarm is doing, making it easier to debug, audit, and optimize operations.
- **Monitoring:** Tracks agent performance and system health.
- **Logs:** Maintains logs for troubleshooting and operational review.
- **Importance:** Provides visibility into the system, ensuring smooth operation and enabling fine-tuning of agent behaviors.
---
### **10. Tools Folder (`tools/`)**
- **Role:**
- The **tools** folder contains specialized utility functions or scripts that agents and swarms may require to complete certain tasks, such as web scraping, API interactions, data parsing, or other external resource handling.
- **Task-Specific Tools:** Agents can call these tools to perform operations outside of their own logic, enabling them to interact with external systems more efficiently.
- **Importance:** Expands the capabilities of agents, allowing them to complete more sophisticated tasks by relying on these external tools.
---
### **11. Utils Folder (`utils/`)**
- **Role:**
- The **utils** folder contains general-purpose utility functions that are reused throughout the framework. These may include functions for data formatting, validation, logging setup, and configuration management.
- **Shared Utilities:** Helps keep the codebase clean by providing reusable functions that multiple agents or parts of the framework can call.
- **Importance:** Provides common functions that help the Swarms framework operate efficiently and consistently.
---
### **Core Initialization File (`__init__.py`)**
- **Role:**
- The `__init__.py` file is the entry point of the Swarms package, ensuring that all necessary modules, agents, and tools are loaded when the Swarms framework is imported. It allows for the modular loading of different components, making it easier for users to work with only the parts of the framework they need.
- **Importance:** Acts as the bridge that connects all other components in the framework, enabling the entire package to work together seamlessly.
---
### How to Access Documentation
- **Official Documentation Site:**
- URL: [docs.swarms.world](https://docs.swarms.world)
- Here, users can find detailed guides, tutorials, and API references on how to use each of the folders mentioned above. The documentation covers setup, agent orchestration, and practical examples of how to leverage swarms for real-world tasks.
- **GitHub Repository:**
- URL: [Swarms GitHub](https://github.com/kyegomez/swarms)
- The repository contains code examples, detailed folder explanations, and further resources on how to get started with building and managing agent swarms.
By understanding the purpose and role of each folder in the Swarms framework, users can more effectively build, orchestrate, and manage agents to handle complex tasks and workflows at scale.
## Support:
- **Post Issue On Github**
- URL: [Submit issue](https://github.com/kyegomez/swarms/issues/new/choose)
- Post your issue whether it's an issue or a feature request
- **Community Support**
- URL: [Submit issue](https://discord.gg/agora-999382051935506503)
- Ask the community for support in real-time and or admin support

@ -0,0 +1,159 @@
# Swarms Framework Architecture
The Swarms package is designed to orchestrate and manage **swarms of agents**, enabling collaboration between multiple Large Language Models (LLMs) or other agent types to solve complex tasks. The architecture is modular and scalable, facilitating seamless integration of various agents, models, prompts, and tools. Below is an overview of the architectural components, along with instructions on where to find the corresponding documentation.
```
swarms/
├── agents/
├── artifacts/
├── cli/
├── memory/
├── models/
├── prompts/
├── schemas/
├── structs/
├── telemetry/
├── tools/
├── utils/
└── __init__.py
```
### Role of Folders in the Swarms Framework
The **Swarms framework** is composed of several key folders, each serving a specific role in building, orchestrating, and managing swarms of agents. Below is an in-depth explanation of the role of each folder in the framework's architecture, focusing on how they contribute to the overall system for handling complex multi-agent workflows.
---
### **1. Agents Folder (`agents/`)**
- **Role:**
- The **agents** folder contains the core logic for individual agents within the Swarms framework. Agents are the key functional units responsible for carrying out specific tasks, whether it be text generation, web scraping, data analysis, or more specialized functions like marketing or accounting.
- **Customization:** Each agent can be specialized for different tasks by defining custom system prompts and behaviors.
- **Modular Agent System:** New agents can be easily added to this folder to expand the framework's capabilities.
- **Importance:** This folder allows users to create and manage multiple types of agents that can interact and collaborate to solve complex problems.
- **Examples:** Accounting agents, marketing agents, and programming agents.
---
### **2. Artifacts Folder (`artifacts/`)**
- **Role:**
- The **artifacts** folder is responsible for storing the results or outputs generated by agents and swarms. This could include reports, logs, or data that agents generate during task execution.
- **Persistent Storage:** It helps maintain a persistent record of agent interactions, making it easier to retrieve or review past actions and outputs.
- **Data Handling:** Users can configure this folder to store artifacts that are essential for later analysis or reporting.
- **Importance:** Acts as a storage mechanism for important task-related outputs, ensuring that no data is lost after tasks are completed.
---
### **3. CLI Folder (`cli/`)**
- **Role:**
- The **CLI** folder contains tools for interacting with the Swarms framework through the command-line interface. This allows users to easily manage and orchestrate swarms without needing a graphical interface.
- **Command-line Tools:** Commands in this folder enable users to initiate, control, and monitor swarms, making the system accessible and versatile.
- **Automation and Scriptability:** Enables advanced users to automate swarm interactions and deploy agents programmatically.
- **Importance:** Provides a flexible way to control the Swarms system for developers who prefer using the command line.
---
### **4. Memory Folder (`memory/`) Depcriated!!**
- **Role:**
- The **memory** folder handles the framework's memory management for agents. This allows agents to retain and recall past interactions or task contexts, enabling continuity in long-running processes or multi-step workflows.
- **Context Retention:** Agents that depend on historical context to make decisions or carry out tasks can store and access memory using this folder.
- **Long-Term and Short-Term Memory:** This could be implemented in various ways, such as short-term conversational memory or long-term knowledge storage.
- **Importance:** Crucial for agents that require memory to handle complex workflows, where decisions are based on prior outputs or interactions.
---
### **5. Models Folder (`models/`)**
- **Role:**
- The **models** folder houses pre-trained machine learning models that agents utilize to complete their tasks. These models could include LLMs (Large Language Models), custom-trained models, or fine-tuned models specific to the tasks being handled by the agents.
- **Plug-and-Play Architecture:** The framework allows users to easily add or switch models depending on the specific needs of their agents.
- **Custom Model Support:** Users can integrate custom models here for more specialized tasks.
- **Importance:** Provides the computational backbone for agent decision-making and task execution.
---
### **6. Prompts Folder (`prompts/`)**
- **Role:**
- The **prompts** folder contains reusable prompt templates that agents use to interact with their environment and complete tasks. These system prompts define the behavior and task orientation of the agents.
- **Template Reusability:** Users can create and store common prompt templates, making it easy to define agent behavior across different tasks without rewriting prompts from scratch.
- **Task-Specific Prompts:** For example, an accounting agent may have a prompt template that guides its interaction with financial data.
- **Importance:** Provides the logic and guidance agents need to generate outputs in a coherent and task-focused manner.
---
### **7. Schemas Folder (`schemas/`)**
- **Role:**
- The **schemas** folder defines the data structures and validation logic for inputs and outputs within the framework, using tools like **Pydantic** for data validation.
- **Standardization and Validation:** This ensures that all interactions between agents and swarms follow consistent data formats, which is critical for large-scale agent coordination and task management.
- **Error Prevention:** By validating data early, it prevents errors from propagating through the system, improving reliability.
- **Importance:** Ensures data consistency across the entire framework, making it easier to integrate and manage swarms of agents at scale.
---
### **8. Structs Folder (`structs/`)**
- **Role:**
- The **structs** folder is the core of the Swarms framework, housing the orchestration logic for managing and coordinating swarms of agents. This folder allows for dynamic task assignment, queue management, inter-agent communication, and result aggregation.
- **Swarm Management:** Agents are grouped into swarms to handle tasks that require multiple agents working in parallel or collaboratively.
- **Scalability:** The swarm structure is designed to be scalable, allowing thousands of agents to operate together on distributed tasks.
- **Task Queueing and Execution:** Supports task queueing, task prioritization, and load balancing between agents.
- **Importance:** This folder is critical for managing how agents interact and collaborate to solve complex, multi-step problems.
---
### **9. Telemetry Folder (`telemetry/`)**
- **Role:**
- The **telemetry** folder provides logging and monitoring tools to capture agent performance metrics, error handling, and real-time activity tracking. It helps users keep track of what each agent or swarm is doing, making it easier to debug, audit, and optimize operations.
- **Monitoring:** Tracks agent performance and system health.
- **Logs:** Maintains logs for troubleshooting and operational review.
- **Importance:** Provides visibility into the system, ensuring smooth operation and enabling fine-tuning of agent behaviors.
---
### **10. Tools Folder (`tools/`)**
- **Role:**
- The **tools** folder contains specialized utility functions or scripts that agents and swarms may require to complete certain tasks, such as web scraping, API interactions, data parsing, or other external resource handling.
- **Task-Specific Tools:** Agents can call these tools to perform operations outside of their own logic, enabling them to interact with external systems more efficiently.
- **Importance:** Expands the capabilities of agents, allowing them to complete more sophisticated tasks by relying on these external tools.
---
### **11. Utils Folder (`utils/`)**
- **Role:**
- The **utils** folder contains general-purpose utility functions that are reused throughout the framework. These may include functions for data formatting, validation, logging setup, and configuration management.
- **Shared Utilities:** Helps keep the codebase clean by providing reusable functions that multiple agents or parts of the framework can call.
- **Importance:** Provides common functions that help the Swarms framework operate efficiently and consistently.
---
### **Core Initialization File (`__init__.py`)**
- **Role:**
- The `__init__.py` file is the entry point of the Swarms package, ensuring that all necessary modules, agents, and tools are loaded when the Swarms framework is imported. It allows for the modular loading of different components, making it easier for users to work with only the parts of the framework they need.
- **Importance:** Acts as the bridge that connects all other components in the framework, enabling the entire package to work together seamlessly.
---
### How to Access Documentation
- **Official Documentation Site:**
- URL: [docs.swarms.world](https://docs.swarms.world)
- Here, users can find detailed guides, tutorials, and API references on how to use each of the folders mentioned above. The documentation covers setup, agent orchestration, and practical examples of how to leverage swarms for real-world tasks.
- **GitHub Repository:**
- URL: [Swarms GitHub](https://github.com/kyegomez/swarms)
- The repository contains code examples, detailed folder explanations, and further resources on how to get started with building and managing agent swarms.
By understanding the purpose and role of each folder in the Swarms framework, users can more effectively build, orchestrate, and manage agents to handle complex tasks and workflows at scale.
## Support:
- **Post Issue On Github**
- URL: [Submit issue](https://github.com/kyegomez/swarms/issues/new/choose)
- Post your issue whether it's an issue or a feature request
- **Community Support**
- URL: [Submit issue](https://discord.gg/agora-999382051935506503)
- Ask the community for support in real-time and or admin support

@ -0,0 +1,122 @@
---
### Federated Swarm
**Overview:**
A Federated Swarm architecture involves multiple independent swarms collaborating to complete a task. Each swarm operates autonomously but can share information and results with other swarms.
**Use-Cases:**
- Distributed learning systems where data is processed across multiple nodes.
- Scenarios requiring collaboration between different teams or departments.
```mermaid
graph TD
A[Central Coordinator]
subgraph Swarm1
B1[Agent 1.1] --> B2[Agent 1.2]
B2 --> B3[Agent 1.3]
end
subgraph Swarm2
C1[Agent 2.1] --> C2[Agent 2.2]
C2 --> C3[Agent 2.3]
end
subgraph Swarm3
D1[Agent 3.1] --> D2[Agent 3.2]
D2 --> D3[Agent 3.3]
end
B1 --> A
C1 --> A
D1 --> A
```
---
### Star Swarm
**Overview:**
A Star Swarm architecture features a central agent that coordinates the activities of several peripheral agents. The central agent assigns tasks to the peripheral agents and aggregates their results.
**Use-Cases:**
- Centralized decision-making processes.
- Scenarios requiring a central authority to coordinate multiple workers.
```mermaid
graph TD
A[Central Agent] --> B1[Peripheral Agent 1]
A --> B2[Peripheral Agent 2]
A --> B3[Peripheral Agent 3]
A --> B4[Peripheral Agent 4]
```
---
### Mesh Swarm
**Overview:**
A Mesh Swarm architecture allows for a fully connected network of agents where each agent can communicate with any other agent. This setup provides high flexibility and redundancy.
**Use-Cases:**
- Complex systems requiring high fault tolerance and redundancy.
- Scenarios involving dynamic and frequent communication between agents.
```mermaid
graph TD
A1[Agent 1] --> A2[Agent 2]
A1 --> A3[Agent 3]
A1 --> A4[Agent 4]
A2 --> A3
A2 --> A4
A3 --> A4
```
---
### Cascade Swarm
**Overview:**
A Cascade Swarm architecture involves a chain of agents where each agent triggers the next one in a cascade effect. This is useful for scenarios where tasks need to be processed in stages, and each stage initiates the next.
**Use-Cases:**
- Multi-stage processing tasks such as data transformation pipelines.
- Event-driven architectures where one event triggers subsequent actions.
```mermaid
graph TD
A[Trigger Agent] --> B[Agent 1]
B --> C[Agent 2]
C --> D[Agent 3]
D --> E[Agent 4]
```
---
### Hybrid Swarm
**Overview:**
A Hybrid Swarm architecture combines elements of various architectures to suit specific needs. It might integrate hierarchical and parallel components, or mix sequential and round robin patterns.
**Use-Cases:**
- Complex workflows requiring a mix of different processing strategies.
- Custom scenarios tailored to specific operational requirements.
```mermaid
graph TD
A[Root Agent] --> B1[Sub-Agent 1]
A --> B2[Sub-Agent 2]
B1 --> C1[Parallel Agent 1]
B1 --> C2[Parallel Agent 2]
B2 --> C3[Sequential Agent 1]
C3 --> C4[Sequential Agent 2]
C3 --> C5[Sequential Agent 3]
```
---
These swarm architectures provide different models for organizing and orchestrating large language models (LLMs) to perform various tasks efficiently. Depending on the specific requirements of your project, you can choose the appropriate architecture or even combine elements from multiple architectures to create a hybrid solution.

@ -0,0 +1,122 @@
---
### Federated Swarm
**Overview:**
A Federated Swarm architecture involves multiple independent swarms collaborating to complete a task. Each swarm operates autonomously but can share information and results with other swarms.
**Use-Cases:**
- Distributed learning systems where data is processed across multiple nodes.
- Scenarios requiring collaboration between different teams or departments.
```mermaid
graph TD
A[Central Coordinator]
subgraph Swarm1
B1[Agent 1.1] --> B2[Agent 1.2]
B2 --> B3[Agent 1.3]
end
subgraph Swarm2
C1[Agent 2.1] --> C2[Agent 2.2]
C2 --> C3[Agent 2.3]
end
subgraph Swarm3
D1[Agent 3.1] --> D2[Agent 3.2]
D2 --> D3[Agent 3.3]
end
B1 --> A
C1 --> A
D1 --> A
```
---
### Star Swarm
**Overview:**
A Star Swarm architecture features a central agent that coordinates the activities of several peripheral agents. The central agent assigns tasks to the peripheral agents and aggregates their results.
**Use-Cases:**
- Centralized decision-making processes.
- Scenarios requiring a central authority to coordinate multiple workers.
```mermaid
graph TD
A[Central Agent] --> B1[Peripheral Agent 1]
A --> B2[Peripheral Agent 2]
A --> B3[Peripheral Agent 3]
A --> B4[Peripheral Agent 4]
```
---
### Mesh Swarm
**Overview:**
A Mesh Swarm architecture allows for a fully connected network of agents where each agent can communicate with any other agent. This setup provides high flexibility and redundancy.
**Use-Cases:**
- Complex systems requiring high fault tolerance and redundancy.
- Scenarios involving dynamic and frequent communication between agents.
```mermaid
graph TD
A1[Agent 1] --> A2[Agent 2]
A1 --> A3[Agent 3]
A1 --> A4[Agent 4]
A2 --> A3
A2 --> A4
A3 --> A4
```
---
### Cascade Swarm
**Overview:**
A Cascade Swarm architecture involves a chain of agents where each agent triggers the next one in a cascade effect. This is useful for scenarios where tasks need to be processed in stages, and each stage initiates the next.
**Use-Cases:**
- Multi-stage processing tasks such as data transformation pipelines.
- Event-driven architectures where one event triggers subsequent actions.
```mermaid
graph TD
A[Trigger Agent] --> B[Agent 1]
B --> C[Agent 2]
C --> D[Agent 3]
D --> E[Agent 4]
```
---
### Hybrid Swarm
**Overview:**
A Hybrid Swarm architecture combines elements of various architectures to suit specific needs. It might integrate hierarchical and parallel components, or mix sequential and round robin patterns.
**Use-Cases:**
- Complex workflows requiring a mix of different processing strategies.
- Custom scenarios tailored to specific operational requirements.
```mermaid
graph TD
A[Root Agent] --> B1[Sub-Agent 1]
A --> B2[Sub-Agent 2]
B1 --> C1[Parallel Agent 1]
B1 --> C2[Parallel Agent 2]
B2 --> C3[Sequential Agent 1]
C3 --> C4[Sequential Agent 2]
C3 --> C5[Sequential Agent 3]
```
---
These swarm architectures provide different models for organizing and orchestrating large language models (LLMs) to perform various tasks efficiently. Depending on the specific requirements of your project, you can choose the appropriate architecture or even combine elements from multiple architectures to create a hybrid solution.

@ -0,0 +1,137 @@
# Choosing the Right Swarm for Your Business Problem
`AgentRearrange` provides various swarm structures designed to fit specific business needs. Depending on the complexity and nature of your problem, different swarm configurations can be more effective in achieving optimal performance. This guide provides a detailed explanation of when to use each swarm type, including their strengths and potential drawbacks.
## Swarm Types Overview
- **MajorityVoting**: A swarm structure where agents vote on an outcome, and the majority decision is taken as the final result.
- **AgentRearrange**: Provides the foundation for both sequential and parallel swarms.
- **RoundRobin**: Agents take turns handling tasks in a cyclic manner.
- **Mixture of Agents**: A heterogeneous swarm where agents with different capabilities are combined.
- **GraphWorkflow**: Agents collaborate in a directed acyclic graph (DAG) format.
- **GroupChat**: Agents engage in a chat-like interaction to reach decisions.
- **AgentRegistry**: A centralized registry where agents are stored, retrieved, and invoked.
- **SpreadsheetSwarm**: A swarm designed to manage tasks at scale, tracking agent outputs in a structured format (e.g., CSV files).
---
## MajorityVoting Swarm
### Use-Case
MajorityVoting is ideal for scenarios where accuracy is paramount, and the decision must be determined from multiple perspectives. For instance, choosing the best marketing strategy where various marketing agents vote on the highest predicted performance.
### Advantages
- Ensures robustness in decision-making by leveraging multiple agents.
- Helps eliminate outliers or faulty agent decisions.
### Warnings
!!! warning
Majority voting can be slow if too many agents are involved. Ensure that your swarm size is manageable for real-time decision-making.
---
## AgentRearrange (Sequential and Parallel)
### Sequential Swarm Use-Case
For linear workflows where each task depends on the outcome of the previous task, such as processing legal documents step by step through a series of checks and validations.
### Parallel Swarm Use-Case
For tasks that can be executed concurrently, such as batch processing customer data in marketing campaigns. Parallel swarms can significantly reduce processing time by dividing tasks across multiple agents.
### Notes
!!! note
Sequential swarms are slower but ensure strict task dependencies are respected. Parallel swarms are faster but require careful management of task interdependencies.
---
## RoundRobin Swarm
### Use-Case
For balanced task distribution where agents need to handle tasks evenly. An example would be assigning customer support tickets to agents in a cyclic manner, ensuring no single agent is overloaded.
### Advantages
- Fair and even distribution of tasks.
- Simple and effective for balanced workloads.
### Warnings
!!! warning
Round-robin may not be the best choice when some agents are more competent than others, as it can assign tasks equally regardless of agent performance.
---
## Mixture of Agents
### Use-Case
Ideal for complex problems that require diverse skills. For example, a financial forecasting problem where some agents specialize in stock data, while others handle economic factors.
### Notes
!!! note
A mixture of agents is highly flexible and can adapt to various problem domains. However, be mindful of coordination overhead.
---
## GraphWorkflow Swarm
### Use-Case
This swarm structure is suited for tasks that can be broken down into a series of dependencies but are not strictly linear, such as an AI-driven software development pipeline where one agent handles front-end development while another handles back-end concurrently.
### Advantages
- Provides flexibility for managing dependencies.
- Agents can work on different parts of the problem simultaneously.
### Warnings
!!! warning
GraphWorkflow requires clear definition of task dependencies, or it can lead to execution issues and delays.
---
## GroupChat Swarm
### Use-Case
For real-time collaborative decision-making. For instance, agents could participate in group chat for negotiating contracts, each contributing their expertise and adjusting responses based on the collective discussion.
### Advantages
- Facilitates highly interactive problem-solving.
- Ideal for dynamic and unstructured problems.
### Warnings
!!! warning
High communication overhead between agents may slow down decision-making in large swarms.
---
## AgentRegistry Swarm
### Use-Case
For dynamically managing agents based on the problem domain. An AgentRegistry is useful when new agents can be added or removed as needed, such as adding new machine learning models for an evolving recommendation engine.
### Notes
!!! note
AgentRegistry is a flexible solution but introduces additional complexity when agents need to be discovered and registered on the fly.
---
## SpreadsheetSwarm
### Use-Case
When dealing with massive-scale data or agent outputs that need to be stored and managed in a tabular format. SpreadsheetSwarm is ideal for businesses handling thousands of agent outputs, such as large-scale marketing analytics or financial audits.
### Advantages
- Provides structure and order for managing massive amounts of agent outputs.
- Outputs are easily saved and tracked in CSV files.
### Warnings
!!! warning
Ensure the correct configuration of agents in SpreadsheetSwarm to avoid data mismatches and inconsistencies when scaling up to thousands of agents.
---
## Final Thoughts
The choice of swarm depends on:
1. **Nature of the task**: Whether it's sequential or parallel.
2. **Problem complexity**: Simple problems might benefit from RoundRobin, while complex ones may need GraphWorkflow or Mixture of Agents.
3. **Scale of execution**: For large-scale tasks, Swarms like SpreadsheetSwarm or MajorityVoting provide scalability with structured outputs.
When integrating agents in a business workflow, it's crucial to balance task complexity, agent capabilities, and scalability to ensure the optimal swarm architecture.

@ -0,0 +1,137 @@
# Choosing the Right Swarm for Your Business Problem
`AgentRearrange` provides various swarm structures designed to fit specific business needs. Depending on the complexity and nature of your problem, different swarm configurations can be more effective in achieving optimal performance. This guide provides a detailed explanation of when to use each swarm type, including their strengths and potential drawbacks.
## Swarm Types Overview
- **MajorityVoting**: A swarm structure where agents vote on an outcome, and the majority decision is taken as the final result.
- **AgentRearrange**: Provides the foundation for both sequential and parallel swarms.
- **RoundRobin**: Agents take turns handling tasks in a cyclic manner.
- **Mixture of Agents**: A heterogeneous swarm where agents with different capabilities are combined.
- **GraphWorkflow**: Agents collaborate in a directed acyclic graph (DAG) format.
- **GroupChat**: Agents engage in a chat-like interaction to reach decisions.
- **AgentRegistry**: A centralized registry where agents are stored, retrieved, and invoked.
- **SpreadsheetSwarm**: A swarm designed to manage tasks at scale, tracking agent outputs in a structured format (e.g., CSV files).
---
## MajorityVoting Swarm
### Use-Case
MajorityVoting is ideal for scenarios where accuracy is paramount, and the decision must be determined from multiple perspectives. For instance, choosing the best marketing strategy where various marketing agents vote on the highest predicted performance.
### Advantages
- Ensures robustness in decision-making by leveraging multiple agents.
- Helps eliminate outliers or faulty agent decisions.
### Warnings
!!! warning
Majority voting can be slow if too many agents are involved. Ensure that your swarm size is manageable for real-time decision-making.
---
## AgentRearrange (Sequential and Parallel)
### Sequential Swarm Use-Case
For linear workflows where each task depends on the outcome of the previous task, such as processing legal documents step by step through a series of checks and validations.
### Parallel Swarm Use-Case
For tasks that can be executed concurrently, such as batch processing customer data in marketing campaigns. Parallel swarms can significantly reduce processing time by dividing tasks across multiple agents.
### Notes
!!! note
Sequential swarms are slower but ensure strict task dependencies are respected. Parallel swarms are faster but require careful management of task interdependencies.
---
## RoundRobin Swarm
### Use-Case
For balanced task distribution where agents need to handle tasks evenly. An example would be assigning customer support tickets to agents in a cyclic manner, ensuring no single agent is overloaded.
### Advantages
- Fair and even distribution of tasks.
- Simple and effective for balanced workloads.
### Warnings
!!! warning
Round-robin may not be the best choice when some agents are more competent than others, as it can assign tasks equally regardless of agent performance.
---
## Mixture of Agents
### Use-Case
Ideal for complex problems that require diverse skills. For example, a financial forecasting problem where some agents specialize in stock data, while others handle economic factors.
### Notes
!!! note
A mixture of agents is highly flexible and can adapt to various problem domains. However, be mindful of coordination overhead.
---
## GraphWorkflow Swarm
### Use-Case
This swarm structure is suited for tasks that can be broken down into a series of dependencies but are not strictly linear, such as an AI-driven software development pipeline where one agent handles front-end development while another handles back-end concurrently.
### Advantages
- Provides flexibility for managing dependencies.
- Agents can work on different parts of the problem simultaneously.
### Warnings
!!! warning
GraphWorkflow requires clear definition of task dependencies, or it can lead to execution issues and delays.
---
## GroupChat Swarm
### Use-Case
For real-time collaborative decision-making. For instance, agents could participate in group chat for negotiating contracts, each contributing their expertise and adjusting responses based on the collective discussion.
### Advantages
- Facilitates highly interactive problem-solving.
- Ideal for dynamic and unstructured problems.
### Warnings
!!! warning
High communication overhead between agents may slow down decision-making in large swarms.
---
## AgentRegistry Swarm
### Use-Case
For dynamically managing agents based on the problem domain. An AgentRegistry is useful when new agents can be added or removed as needed, such as adding new machine learning models for an evolving recommendation engine.
### Notes
!!! note
AgentRegistry is a flexible solution but introduces additional complexity when agents need to be discovered and registered on the fly.
---
## SpreadsheetSwarm
### Use-Case
When dealing with massive-scale data or agent outputs that need to be stored and managed in a tabular format. SpreadsheetSwarm is ideal for businesses handling thousands of agent outputs, such as large-scale marketing analytics or financial audits.
### Advantages
- Provides structure and order for managing massive amounts of agent outputs.
- Outputs are easily saved and tracked in CSV files.
### Warnings
!!! warning
Ensure the correct configuration of agents in SpreadsheetSwarm to avoid data mismatches and inconsistencies when scaling up to thousands of agents.
---
## Final Thoughts
The choice of swarm depends on:
1. **Nature of the task**: Whether it's sequential or parallel.
2. **Problem complexity**: Simple problems might benefit from RoundRobin, while complex ones may need GraphWorkflow or Mixture of Agents.
3. **Scale of execution**: For large-scale tasks, Swarms like SpreadsheetSwarm or MajorityVoting provide scalability with structured outputs.
When integrating agents in a business workflow, it's crucial to balance task complexity, agent capabilities, and scalability to ensure the optimal swarm architecture.

@ -43,7 +43,8 @@ graph TD
### Parallel Swarm
**Overview:**
In a Parallel Swarm architecture, multiple agents operate independently and simultaneously on different tasks. Each agent works on its own task without dependencies on the others.
In a Parallel Swarm architecture, multiple agents operate independently and simultaneously on different tasks. Each agent works on its own task without dependencies on the others. [Learn more here in the docs:](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/)
**Use-Cases:**
- Tasks that can be processed independently, such as parallel data analysis.
@ -51,7 +52,7 @@ In a Parallel Swarm architecture, multiple agents operate independently and simu
```mermaid
graph LR
A[Coordinator Agent] --> B1[Sub-Agent 1]
A[Task] --> B1[Sub-Agent 1]
A --> B2[Sub-Agent 2]
A --> B3[Sub-Agent 3]
A --> B4[Sub-Agent 4]
@ -62,7 +63,7 @@ graph LR
### Sequential Swarm
**Overview:**
A Sequential Swarm architecture processes tasks in a linear sequence. Each agent completes its task before passing the result to the next agent in the chain. This architecture ensures orderly processing and is useful when tasks have dependencies.
A Sequential Swarm architecture processes tasks in a linear sequence. Each agent completes its task before passing the result to the next agent in the chain. This architecture ensures orderly processing and is useful when tasks have dependencies. [Learn more here in the docs:](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/)
**Use-Cases:**
- Workflows where each step depends on the previous one, such as assembly lines or sequential data processing.
@ -84,6 +85,7 @@ graph TD
In a Round Robin Swarm architecture, tasks are distributed cyclically among a set of agents. Each agent takes turns handling tasks in a rotating order, ensuring even distribution of workload.
**Use-Cases:**
- Load balancing in distributed systems.
- Scenarios requiring fair distribution of tasks to avoid overloading any single agent.
@ -99,3 +101,58 @@ graph TD
B3 --> A
B4 --> A
```
### SpreadSheet Swarm
**Overview:**
The SpreadSheet Swarm makes it easy to manage thousands of agents all in one place: a csv file. You can initialize any number of agents and then there is a loop parameter to run the loop of agents on the task. Learn more in the [docs here](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/)
**Use-Cases:**
- Multi-threaded execution: Execution agents on multiple threads
- Save agent outputs into CSV file
- One place to analyze agent outputs
```mermaid
graph TD
A[Initialize SpreadSheetSwarm] --> B[Initialize Agents]
B --> C[Load Task Queue]
C --> D[Run Task]
subgraph Agents
D --> E1[Agent 1]
D --> E2[Agent 2]
D --> E3[Agent 3]
end
E1 --> F1[Process Task]
E2 --> F2[Process Task]
E3 --> F3[Process Task]
F1 --> G1[Track Output]
F2 --> G2[Track Output]
F3 --> G3[Track Output]
subgraph Save Outputs
G1 --> H[Save to CSV]
G2 --> H[Save to CSV]
G3 --> H[Save to CSV]
end
H --> I{Autosave Enabled?}
I --> |Yes| J[Export Metadata to JSON]
I --> |No| K[End Swarm Run]
%% Style adjustments
classDef blackBox fill:#000,stroke:#f00,color:#fff;
class A,B,C,D,E1,E2,E3,F1,F2,F3,G1,G2,G3,H,I,J,K blackBox;
```

@ -0,0 +1,238 @@
# Contribution Guidelines
---
## Table of Contents
- [Project Overview](#project-overview)
- [Getting Started](#getting-started)
- [Installation](#installation)
- [Project Structure](#project-structure)
- [How to Contribute](#how-to-contribute)
- [Reporting Issues](#reporting-issues)
- [Submitting Pull Requests](#submitting-pull-requests)
- [Coding Standards](#coding-standards)
- [Type Annotations](#type-annotations)
- [Docstrings and Documentation](#docstrings-and-documentation)
- [Testing](#testing)
- [Code Style](#code-style)
- [Areas Needing Contributions](#areas-needing-contributions)
- [Writing Tests](#writing-tests)
- [Improving Documentation](#improving-documentation)
- [Creating Training Scripts](#creating-training-scripts)
- [Community and Support](#community-and-support)
- [License](#license)
---
## Project Overview
**swarms** is a library focused on making it simple to orchestrate agents to automate real-world activities. The goal is to automate the world economy with these swarms of agents.
We need your help to:
- **Write Tests**: Ensure the reliability and correctness of the codebase.
- **Improve Documentation**: Maintain clear and comprehensive documentation.
- **Add New Orchestration Methods**: Add multi-agent orchestration methods
- **Removing Defunct Code**: Removing bad code
Your contributions will help us push the boundaries of AI and make this library a valuable resource for the community.
---
## Getting Started
### Installation
You can install swarms using `pip`:
```bash
pip3 install swarms
```
Alternatively, you can clone the repository:
```bash
git clone https://github.com/kyegomez/swarms
```
### Project Structure
- **`swarms/`**: Contains all the source code for the library.
- **`examples/`**: Includes example scripts and notebooks demonstrating how to use the library.
- **`tests/`**: (To be created) Will contain unit tests for the library.
- **`docs/`**: (To be maintained) Contains documentation files.
---
## How to Contribute
### Reporting Issues
If you find any bugs, inconsistencies, or have suggestions for enhancements, please open an issue on GitHub:
1. **Search Existing Issues**: Before opening a new issue, check if it has already been reported.
2. **Open a New Issue**: If it hasn't been reported, create a new issue and provide detailed information.
- **Title**: A concise summary of the issue.
- **Description**: Detailed description, steps to reproduce, expected behavior, and any relevant logs or screenshots.
3. **Label Appropriately**: Use labels to categorize the issue (e.g., bug, enhancement, documentation).
### Submitting Pull Requests
We welcome pull requests (PRs) for bug fixes, improvements, and new features. Please follow these guidelines:
1. **Fork the Repository**: Create a personal fork of the repository on GitHub.
2. **Clone Your Fork**: Clone your forked repository to your local machine.
```bash
git clone https://github.com/kyegomez/swarms.git
```
3. **Create a New Branch**: Use a descriptive branch name.
```bash
git checkout -b feature/your-feature-name
```
4. **Make Your Changes**: Implement your code, ensuring it adheres to the coding standards.
5. **Add Tests**: Write tests to cover your changes.
6. **Commit Your Changes**: Write clear and concise commit messages.
```bash
git commit -am "Add feature X"
```
7. **Push to Your Fork**:
```bash
git push origin feature/your-feature-name
```
8. **Create a Pull Request**:
- Go to the original repository on GitHub.
- Click on "New Pull Request".
- Select your branch and create the PR.
- Provide a clear description of your changes and reference any related issues.
9. **Respond to Feedback**: Be prepared to make changes based on code reviews.
**Note**: It's recommended to create small and focused PRs for easier review and faster integration.
---
## Coding Standards
To maintain code quality and consistency, please adhere to the following standards.
### Type Annotations
- **Mandatory**: All functions and methods must have type annotations.
- **Example**:
```python
def add_numbers(a: int, b: int) -> int:
return a + b
```
- **Benefits**:
- Improves code readability.
- Helps with static type checking tools.
### Docstrings and Documentation
- **Docstrings**: Every public class, function, and method must have a docstring following the [Google Python Style Guide](http://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) or [NumPy Docstring Standard](https://numpydoc.readthedocs.io/en/latest/format.html).
- **Content**:
- **Description**: Briefly describe what the function or class does.
- **Args**: List and describe each parameter.
- **Returns**: Describe the return value(s).
- **Raises**: List any exceptions that are raised.
- **Example**:
```python
def calculate_mean(values: List[float]) -> float:
"""
Calculates the mean of a list of numbers.
Args:
values (List[float]): A list of numerical values.
Returns:
float: The mean of the input values.
Raises:
ValueError: If the input list is empty.
"""
if not values:
raise ValueError("The input list is empty.")
return sum(values) / len(values)
```
- **Documentation**: Update or create documentation pages if your changes affect the public API.
### Testing
- **Required**: All new features and bug fixes must include appropriate unit tests.
- **Framework**: Use `unittest`, `pytest`, or a similar testing framework.
- **Test Location**: Place tests in the `tests/` directory, mirroring the structure of `swarms/`.
- **Test Coverage**: Aim for high test coverage to ensure code reliability.
- **Running Tests**: Provide instructions for running tests.
```bash
pytest tests/
```
### Code Style
- **PEP 8 Compliance**: Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guidelines.
- **Linting Tools**: Use `flake8`, `black`, or `pylint` to check code style.
- **Consistency**: Maintain consistency with the existing codebase.
---
## Areas Needing Contributions
We have several areas where contributions are particularly welcome.
### Writing Tests
- **Goal**: Increase test coverage to ensure the library's robustness.
- **Tasks**:
- Write unit tests for existing code in `swarms/`.
- Identify edge cases and potential failure points.
- Ensure tests are repeatable and independent.
### Improving Documentation
- **Goal**: Maintain clear and comprehensive documentation for users and developers.
- **Tasks**:
- Update docstrings to reflect any changes.
- Add examples and tutorials in the `examples/` directory.
- Improve or expand the content in the `docs/` directory.
### Creating Multi-Agent Orchestration Methods
- **Goal**: Provide new multi-agent orchestration methods
---
## Community and Support
- **Communication**: Engage with the community by participating in discussions on issues and pull requests.
- **Respect**: Maintain a respectful and inclusive environment.
- **Feedback**: Be open to receiving and providing constructive feedback.
---
## License
By contributing to swarms, you agree that your contributions will be licensed under the [MIT License](LICENSE).
---
Thank you for contributing to swarms! Your efforts help make this project better for everyone.
If you have any questions or need assistance, please feel free to open an issue or reach out to the maintainers.

@ -0,0 +1,238 @@
# Contribution Guidelines
---
## Table of Contents
- [Project Overview](#project-overview)
- [Getting Started](#getting-started)
- [Installation](#installation)
- [Project Structure](#project-structure)
- [How to Contribute](#how-to-contribute)
- [Reporting Issues](#reporting-issues)
- [Submitting Pull Requests](#submitting-pull-requests)
- [Coding Standards](#coding-standards)
- [Type Annotations](#type-annotations)
- [Docstrings and Documentation](#docstrings-and-documentation)
- [Testing](#testing)
- [Code Style](#code-style)
- [Areas Needing Contributions](#areas-needing-contributions)
- [Writing Tests](#writing-tests)
- [Improving Documentation](#improving-documentation)
- [Creating Training Scripts](#creating-training-scripts)
- [Community and Support](#community-and-support)
- [License](#license)
---
## Project Overview
**swarms** is a library focused on making it simple to orchestrate agents to automate real-world activities. The goal is to automate the world economy with these swarms of agents.
We need your help to:
- **Write Tests**: Ensure the reliability and correctness of the codebase.
- **Improve Documentation**: Maintain clear and comprehensive documentation.
- **Add New Orchestration Methods**: Add multi-agent orchestration methods
- **Removing Defunct Code**: Removing bad code
Your contributions will help us push the boundaries of AI and make this library a valuable resource for the community.
---
## Getting Started
### Installation
You can install swarms using `pip`:
```bash
pip3 install swarms
```
Alternatively, you can clone the repository:
```bash
git clone https://github.com/kyegomez/swarms
```
### Project Structure
- **`swarms/`**: Contains all the source code for the library.
- **`examples/`**: Includes example scripts and notebooks demonstrating how to use the library.
- **`tests/`**: (To be created) Will contain unit tests for the library.
- **`docs/`**: (To be maintained) Contains documentation files.
---
## How to Contribute
### Reporting Issues
If you find any bugs, inconsistencies, or have suggestions for enhancements, please open an issue on GitHub:
1. **Search Existing Issues**: Before opening a new issue, check if it has already been reported.
2. **Open a New Issue**: If it hasn't been reported, create a new issue and provide detailed information.
- **Title**: A concise summary of the issue.
- **Description**: Detailed description, steps to reproduce, expected behavior, and any relevant logs or screenshots.
3. **Label Appropriately**: Use labels to categorize the issue (e.g., bug, enhancement, documentation).
### Submitting Pull Requests
We welcome pull requests (PRs) for bug fixes, improvements, and new features. Please follow these guidelines:
1. **Fork the Repository**: Create a personal fork of the repository on GitHub.
2. **Clone Your Fork**: Clone your forked repository to your local machine.
```bash
git clone https://github.com/kyegomez/swarms.git
```
3. **Create a New Branch**: Use a descriptive branch name.
```bash
git checkout -b feature/your-feature-name
```
4. **Make Your Changes**: Implement your code, ensuring it adheres to the coding standards.
5. **Add Tests**: Write tests to cover your changes.
6. **Commit Your Changes**: Write clear and concise commit messages.
```bash
git commit -am "Add feature X"
```
7. **Push to Your Fork**:
```bash
git push origin feature/your-feature-name
```
8. **Create a Pull Request**:
- Go to the original repository on GitHub.
- Click on "New Pull Request".
- Select your branch and create the PR.
- Provide a clear description of your changes and reference any related issues.
9. **Respond to Feedback**: Be prepared to make changes based on code reviews.
**Note**: It's recommended to create small and focused PRs for easier review and faster integration.
---
## Coding Standards
To maintain code quality and consistency, please adhere to the following standards.
### Type Annotations
- **Mandatory**: All functions and methods must have type annotations.
- **Example**:
```python
def add_numbers(a: int, b: int) -> int:
return a + b
```
- **Benefits**:
- Improves code readability.
- Helps with static type checking tools.
### Docstrings and Documentation
- **Docstrings**: Every public class, function, and method must have a docstring following the [Google Python Style Guide](http://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) or [NumPy Docstring Standard](https://numpydoc.readthedocs.io/en/latest/format.html).
- **Content**:
- **Description**: Briefly describe what the function or class does.
- **Args**: List and describe each parameter.
- **Returns**: Describe the return value(s).
- **Raises**: List any exceptions that are raised.
- **Example**:
```python
def calculate_mean(values: List[float]) -> float:
"""
Calculates the mean of a list of numbers.
Args:
values (List[float]): A list of numerical values.
Returns:
float: The mean of the input values.
Raises:
ValueError: If the input list is empty.
"""
if not values:
raise ValueError("The input list is empty.")
return sum(values) / len(values)
```
- **Documentation**: Update or create documentation pages if your changes affect the public API.
### Testing
- **Required**: All new features and bug fixes must include appropriate unit tests.
- **Framework**: Use `unittest`, `pytest`, or a similar testing framework.
- **Test Location**: Place tests in the `tests/` directory, mirroring the structure of `swarms/`.
- **Test Coverage**: Aim for high test coverage to ensure code reliability.
- **Running Tests**: Provide instructions for running tests.
```bash
pytest tests/
```
### Code Style
- **PEP 8 Compliance**: Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guidelines.
- **Linting Tools**: Use `flake8`, `black`, or `pylint` to check code style.
- **Consistency**: Maintain consistency with the existing codebase.
---
## Areas Needing Contributions
We have several areas where contributions are particularly welcome.
### Writing Tests
- **Goal**: Increase test coverage to ensure the library's robustness.
- **Tasks**:
- Write unit tests for existing code in `swarms/`.
- Identify edge cases and potential failure points.
- Ensure tests are repeatable and independent.
### Improving Documentation
- **Goal**: Maintain clear and comprehensive documentation for users and developers.
- **Tasks**:
- Update docstrings to reflect any changes.
- Add examples and tutorials in the `examples/` directory.
- Improve or expand the content in the `docs/` directory.
### Creating Multi-Agent Orchestration Methods
- **Goal**: Provide new multi-agent orchestration methods
---
## Community and Support
- **Communication**: Engage with the community by participating in discussions on issues and pull requests.
- **Respect**: Maintain a respectful and inclusive environment.
- **Feedback**: Be open to receiving and providing constructive feedback.
---
## License
By contributing to swarms, you agree that your contributions will be licensed under the [MIT License](LICENSE).
---
Thank you for contributing to swarms! Your efforts help make this project better for everyone.
If you have any questions or need assistance, please feel free to open an issue or reach out to the maintainers.

@ -16,7 +16,441 @@ $ pip install swarms-memory
### Integrating Pinecone with the Agent Class
Pinecone is a vector database for high-speed querying of large-scale datasets. Here's how you can integrate it with your agent:
The next step is to implement the abstract methods defined in the `BaseVectorDatabase` class. These methods provide the core functionality for interacting with your vector database, such as connecting, querying, and performing CRUD operations.
```python
from swarms import BaseVectorDatabase
class MyCustomVectorDatabase(BaseVectorDatabase):
    def __init__(self, *args, **kwargs):
        # Custom initialization logic
        pass
    def connect(self):
        # Implementation for connecting to the vector database
        pass
    def close(self):
        # Implementation for closing the vector database connection
        pass
    def query(self, query: str):
        # Implementation for executing a query on the vector database
        pass
    def fetch_all(self):
        # Implementation for fetching all rows from the result set
        pass
    def fetch_one(self):
        # Implementation for fetching a single row from the result set
        pass
    def add(self, doc: str):
        # Implementation for adding a new record to the vector database
        pass
    def get(self, query: str):
        # Implementation for retrieving a record from the vector database
        pass
    def update(self, doc):
        # Implementation for updating a record in the vector database
        pass
    def delete(self, message):
        # Implementation for deleting a record from the vector database
        pass
```
In this example, we define placeholders for each of the abstract methods within the `MyCustomVectorDatabase` class. These placeholders will be replaced with the actual implementation logic specific to your chosen vector database solution.
### Step 3: Choose and Integrate Your Vector Database Solution
With the foundational structure in place, it's time to choose a specific vector database solution and integrate it into your custom implementation. In this guide, we'll explore several popular vector database solutions, including PostgreSQL, Pinecone, Chroma, FAISS, and more, providing examples and guidance on how to integrate them seamlessly.
### PostgreSQL Integration
PostgreSQL is a powerful open-source relational database management system that supports vector data types and operations, making it a viable choice for building custom vector memory databases.
```python
import psycopg2
from swarms import BaseVectorDatabase
class PostgreSQLVectorDatabase(MyCustomVectorDatabase):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # PostgreSQL connection details
        self.conn = psycopg2.connect(
            host="localhost",
            database="vector_db",
            user="postgres",
            password="your_password"
        )
        self.cur = self.conn.cursor()
    def connect(self):
        # PostgreSQL connection logic
        pass
    def close(self):
        # Close PostgreSQL connection
        self.cur.close()
        self.conn.close()
    def query(self, query: str):
        # Execute PostgreSQL query
        self.cur.execute(query)
    def fetch_all(self):
        # Fetch all rows from PostgreSQL result set
        return self.cur.fetchall()
    # Implement other abstract methods
```
In this example, we define a `PostgreSQLVectorDatabase` class that inherits from `MyCustomVectorDatabase`. Within the `__init__` method, we establish a connection to a PostgreSQL database using the `psycopg2` library. We then implement the `connect()`, `close()`, `query()`, and `fetch_all()` methods specific to PostgreSQL.
### Pinecone Integration
Pinecone is a managed vector database service that provides efficient storage, retrieval, and manipulation of high-dimensional vector data.
```python
import pinecone
from swarms import BaseVectorDatabase
class PineconeVectorDatabase(MyCustomVectorDatabase):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # Pinecone initialization
        pinecone.init(api_key="your_api_key", environment="your_environment")
        self.index = pinecone.Index("your_index_name")
    def connect(self):
        # Pinecone connection logic
        pass
    def close(self):
        # Close Pinecone connection
        pass
    def query(self, query: str):
        # Execute Pinecone query
        results = self.index.query(query)
        return results
    def add(self, doc: str):
        # Add document to Pinecone index
        self.index.upsert([("id", doc)])
    # Implement other abstract methods
```
In this example, we define a `PineconeVectorDatabase` class that inherits from `MyCustomVectorDatabase`. Within the `__init__` method, we initialize the Pinecone client and create an index. We then implement the `query()` and `add()` methods specific to the Pinecone API.
### Chroma Integration
Chroma is an open-source vector database library that provides efficient storage, retrieval, and manipulation of vector data using various backends, including DuckDB, Chromadb, and more.
```python
import logging
import os
import uuid
from typing import Optional
import chromadb
from dotenv import load_dotenv
from swarms.utils.data_to_text import data_to_text
from swarms.utils.markdown_message import display_markdown_message
from swarms_memory import BaseVectorDatabase
# Load environment variables
load_dotenv()
# Results storage using local ChromaDB
class ChromaDB(BaseVectorDatabase):
"""
ChromaDB database
Args:
metric (str): The similarity metric to use.
output (str): The name of the collection to store the results in.
limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000.
n_results (int, optional): The number of results to retrieve. Defaults to 2.
Methods:
add: _description_
query: _description_
Examples:
>>> chromadb = ChromaDB(
>>> metric="cosine",
>>> output="results",
>>> llm="gpt3",
>>> openai_api_key=OPENAI_API_KEY,
>>> )
>>> chromadb.add(task, result, result_id)
"""
def __init__(
self,
metric: str = "cosine",
output_dir: str = "swarms",
limit_tokens: Optional[int] = 1000,
n_results: int = 3,
docs_folder: str = None,
verbose: bool = False,
*args,
**kwargs,
):
self.metric = metric
self.output_dir = output_dir
self.limit_tokens = limit_tokens
self.n_results = n_results
self.docs_folder = docs_folder
self.verbose = verbose
# Disable ChromaDB logging
if verbose:
logging.getLogger("chromadb").setLevel(logging.INFO)
# Create Chroma collection
chroma_persist_dir = "chroma"
chroma_client = chromadb.PersistentClient(
settings=chromadb.config.Settings(
persist_directory=chroma_persist_dir,
),
*args,
**kwargs,
)
# Create ChromaDB client
self.client = chromadb.Client()
# Create Chroma collection
self.collection = chroma_client.get_or_create_collection(
name=output_dir,
metadata={"hnsw:space": metric},
*args,
**kwargs,
)
display_markdown_message(
"ChromaDB collection created:"
f" {self.collection.name} with metric: {self.metric} and"
f" output directory: {self.output_dir}"
)
# If docs
if docs_folder:
display_markdown_message(
f"Traversing directory: {docs_folder}"
)
self.traverse_directory()
def add(
self,
document: str,
*args,
**kwargs,
):
"""
Add a document to the ChromaDB collection.
Args:
document (str): The document to be added.
condition (bool, optional): The condition to check before adding the document. Defaults to True.
Returns:
str: The ID of the added document.
"""
try:
doc_id = str(uuid.uuid4())
self.collection.add(
ids=[doc_id],
documents=[document],
*args,
**kwargs,
)
print("-----------------")
print("Document added successfully")
print("-----------------")
return doc_id
except Exception as e:
raise Exception(f"Failed to add document: {str(e)}")
def query(
self,
query_text: str,
*args,
**kwargs,
):
"""
Query documents from the ChromaDB collection.
Args:
query (str): The query string.
n_docs (int, optional): The number of documents to retrieve. Defaults to 1.
Returns:
dict: The retrieved documents.
"""
try:
docs = self.collection.query(
query_texts=[query_text],
n_results=self.n_results,
*args,
**kwargs,
)["documents"]
return docs[0]
except Exception as e:
raise Exception(f"Failed to query documents: {str(e)}")
def traverse_directory(self):
"""
Traverse through every file in the given directory and its subdirectories,
and return the paths of all files.
Parameters:
- directory_name (str): The name of the directory to traverse.
Returns:
- list: A list of paths to each file in the directory and its subdirectories.
"""
added_to_db = False
for root, dirs, files in os.walk(self.docs_folder):
for file in files:
file = os.path.join(self.docs_folder, file)
_, ext = os.path.splitext(file)
data = data_to_text(file)
added_to_db = self.add(str(data))
print(f"{file} added to Database")
return added_to_db
```
In this example, we define a `ChromaDB` class that inherits from `MyCustomVectorDatabase`. Within the `__init__` method, we create a Chroma client and get or create a collection. We then implement the `query()` and `add()` methods specific to the Chroma API.
### FAISS Integration
FAISS (Facebook AI Similarity Search) is a library for efficient similarity search and clustering of dense vectors, developed by Meta AI.
```python
import faiss
class FAISSVectorDatabase(MyCustomVectorDatabase):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # FAISS initialization
        self.index = faiss.IndexFlatL2(64)  # Assuming 64-dimensional vectors
        self.index_path = "faiss_index.index"
    def connect(self):
        # FAISS connection logic
        self.index = faiss.read_index(self.index_path)
    def close(self):
        # Close FAISS connection
        faiss.write_index(self.index, self.index_path)
    def query(self, query: str):
        # Execute FAISS query
        query_vector = # Convert query to vector
        distances, indices = self.index.search(query_vector, k=10)
        return [(self.index.reconstruct(i), d) for i, d in zip(indices, distances)]
    def add(self, doc: str):
        # Add document to FAISS index
        doc_vector = # Convert doc to vector
        self.index.add(doc_vector)
    # Implement other abstract methods
```
Now, how do you integrate a vector datbase with an agent? This is how:
## Integrate Memory with `Agent`
```python
from typing import List, Dict, Any

@ -154,7 +154,7 @@ You will also need a Pinecone account and API key. Follow the instructions on th
3. Import the necessary modules in your Python code:
```python
from swarms.memory.vector_stores.pinecone import PineconeVector
from swarms_memory import PineconeVector
```
Now you're ready to use the `PineconeVector` class to work with Pinecone for vector storage.

@ -46,7 +46,7 @@ To use the Weaviate API Client, you need to initialize an instance of the `Weavi
Here's an example of how to initialize a WeaviateDB:
```python
from swarms.memory import WeaviateDB
from swarms_memory import WeaviateDB
weaviate_client = WeaviateDB(
http_host="YOUR_HTTP_HOST",

@ -1,6 +1,6 @@
# `Agent` Documentation
Swarm Agent is a powerful autonomous agent framework designed to connect Language Models (LLMs) with various tools and long-term memory. This framework provides the ability to ingest and process various types of documents such as PDFs, text files, Markdown files, JSON files, and more. The Swarm Agent offers a wide range of features to enhance the capabilities of LLMs and facilitate efficient task execution.
Swarm Agent is a powerful autonomous agent framework designed to connect Language Models (LLMs) with various tools and long-term memory. This class provides the ability to ingest and process various types of documents such as PDFs, text files, Markdown files, JSON files, and more. The Agent structure offers a wide range of features to enhance the capabilities of LLMs and facilitate efficient task execution.
1. **Conversational Loop**: It establishes a conversational loop with a language model. This means it allows you to interact with the model in a back-and-forth manner, taking turns in the conversation.
@ -128,24 +128,49 @@ Swarm Agent is a powerful autonomous agent framework designed to connect Languag
First run the following:
```bash
pip3 install swarms
pip3 install -U swarms
```
And, then now you can get started with the following:
```python
from swarms.models import OpenAIChat
from swarms.structs import Agent
import os
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
# Initialize the language model
llm = OpenAIChat()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agent
agent = Agent(llm=llm, max_loops=3)
agent = Agent(
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=False,
)
out = agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
)
print(out)
# Run a task
response = agent.run("Generate a report on the financial performance of a company.")
print(response)
```
This example initializes an instance of the `Agent` class with an OpenAI language model and a maximum of 3 loops. The `run()` method is then called with a task to generate a report on financial performance, and the agent's response is printed.
@ -156,14 +181,131 @@ The Swarm Agent provides numerous advanced features and customization options. H
### Tool Integration
To integrate tools with the Swarm Agent, you can pass a list of callable functions to the `tools` parameter when initializing the `Agent` instance. The agent will automatically convert these functions into an OpenAI function calling schema and make them available for use during task execution.
To integrate tools with the Swarm Agent, you can pass a list of callable functions with types and doc strings to the `tools` parameter when initializing the `Agent` instance. The agent will automatically convert these functions into an OpenAI function calling schema and make them available for use during task execution.
## Requirements for a tool
- Function
- With types
- with doc strings
```python
from swarms.structs import Agent
from my_tools import tool_function_1, tool_function_2
from swarms import Agent, OpenAIChat
from swarms_memory import ChromaDB
import subprocess
import os
# Making an instance of the ChromaDB class
memory = ChromaDB(
metric="cosine",
n_results=3,
output_dir="results",
docs_folder="docs",
)
# Model
model = OpenAIChat(
api_key=os.getenv("OPENAI_API_KEY"),
model_name="gpt-4o-mini",
temperature=0.1,
)
# Tools in swarms are simple python functions and docstrings
def terminal(
code: str,
):
"""
Run code in the terminal.
Args:
code (str): The code to run in the terminal.
Returns:
str: The output of the code.
"""
out = subprocess.run(
code, shell=True, capture_output=True, text=True
).stdout
return str(out)
def browser(query: str):
"""
Search the query in the browser with the `browser` tool.
Args:
query (str): The query to search in the browser.
Returns:
str: The search results.
"""
import webbrowser
url = f"https://www.google.com/search?q={query}"
webbrowser.open(url)
return f"Searching for {query} in the browser."
def create_file(file_path: str, content: str):
"""
Create a file using the file editor tool.
Args:
file_path (str): The path to the file.
content (str): The content to write to the file.
Returns:
str: The result of the file creation operation.
"""
with open(file_path, "w") as file:
file.write(content)
return f"File {file_path} created successfully."
def file_editor(file_path: str, mode: str, content: str):
"""
Edit a file using the file editor tool.
Args:
file_path (str): The path to the file.
mode (str): The mode to open the file in.
content (str): The content to write to the file.
Returns:
str: The result of the file editing operation.
"""
with open(file_path, mode) as file:
file.write(content)
return f"File {file_path} edited successfully."
# Agent
agent = Agent(
agent_name="Devin",
system_prompt=(
"Autonomous agent that can interact with humans and other"
" agents. Be Helpful and Kind. Use the tools provided to"
" assist the user. Return all code in markdown format."
),
llm=model,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
streaming=True,
long_term_memory=memory,
)
# Run the agent
out = agent(
"Create a CSV file with the latest tax rates for C corporations in the following ten states and the District of Columbia: Alabama, California, Florida, Georgia, Illinois, New York, North Carolina, Ohio, Texas, and Washington."
)
print(out)
# Initialize the agent with tools
agent = Agent(llm=llm, max_loops=3, tools=[tool_function_1, tool_function_2])
```
### Long-term Memory Management
@ -171,14 +313,50 @@ agent = Agent(llm=llm, max_loops=3, tools=[tool_function_1, tool_function_2])
The Swarm Agent supports integration with various vector databases for long-term memory management. You can pass an instance of a `BaseVectorDatabase` implementation to the `long_term_memory` parameter when initializing the `Agent`.
```python
from swarms.structs import Agent
from swarms.memory.chroma import ChromaVectorDatabase
import os
from swarms_memory import ChromaDB
from swarms import Agent, Anthropic
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
# Initilaize the chromadb client
chromadb = ChromaDB(
metric="cosine",
output_dir="fiance_agent_rag",
# docs_folder="artifacts", # Folder of your documents
)
# Model
model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
agent_description="Agent creates ",
llm=model,
max_loops="auto",
autosave=True,
dashboard=False,
verbose=True,
streaming_on=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=3,
context_length=200000,
long_term_memory=chromadb,
)
# Initialize a vector database
vector_db = ChromaVectorDatabase(persist_directory="path/to/db")
# Initialize the agent with long-term memory
agent = Agent(llm=llm, max_loops=3, long_term_memory=vector_db)
agent.run(
"What are the components of a startups stock incentive equity plan"
)
```
### Document Ingestion
@ -317,8 +495,6 @@ agent.tokens_checks()
# Print the dashboard of the agent
agent.print_dashboard()
# Print the history and memory of the agent
agent.print_history_and_memory()
# Fetch all the documents from the doc folders
agent.get_docs_from_doc_folders()

@ -1,77 +1,296 @@
```
# Module/Function Name: ConcurrentWorkflow
# ConcurrentWorkflow Documentation
## Overview
The `ConcurrentWorkflow` class is designed to facilitate the concurrent execution of multiple agents, each tasked with solving a specific query or problem. This class is particularly useful in scenarios where multiple agents need to work in parallel, allowing for efficient resource utilization and faster completion of tasks. The workflow manages the execution, collects metadata, and optionally saves the results in a structured format.
### Key Features
- **Concurrent Execution**: Runs multiple agents simultaneously using Python's `asyncio` and `ThreadPoolExecutor`.
- **Metadata Collection**: Gathers detailed metadata about each agent's execution, including start and end times, duration, and output.
- **Customizable Output**: Allows the user to save metadata to a file or return it as a string or dictionary.
- **Error Handling**: Catches and logs errors during agent execution, ensuring the workflow can continue.
## Class Definitions
### AgentOutputSchema
The `AgentOutputSchema` class is a data model that captures the output and metadata for each agent's execution. It inherits from `pydantic.BaseModel` and provides structured fields to store essential information.
| Attribute | Type | Description |
|---------------|----------------|-----------------------------------------------------------|
| `run_id` | `Optional[str]`| Unique ID for the run, automatically generated using `uuid`. |
| `agent_name` | `Optional[str]`| Name of the agent that executed the task. |
| `task` | `Optional[str]`| The task or query given to the agent. |
| `output` | `Optional[str]`| The output generated by the agent. |
| `start_time` | `Optional[datetime]`| The time when the agent started the task. |
| `end_time` | `Optional[datetime]`| The time when the agent completed the task. |
| `duration` | `Optional[float]` | The total time taken to complete the task, in seconds. |
### MetadataSchema
The `MetadataSchema` class is another data model that aggregates the outputs from all agents involved in the workflow. It also inherits from `pydantic.BaseModel` and includes fields for additional workflow-level metadata.
| Attribute | Type | Description |
|----------------|------------------------|-----------------------------------------------------------|
| `swarm_id` | `Optional[str]` | Unique ID for the workflow run, generated using `uuid`. |
| `task` | `Optional[str]` | The task or query given to all agents. |
| `description` | `Optional[str]` | A description of the workflow, typically indicating concurrent execution. |
| `agents` | `Optional[List[AgentOutputSchema]]` | A list of agent outputs and metadata. |
| `timestamp` | `Optional[datetime]` | The timestamp when the workflow was executed. |
## ConcurrentWorkflow
The `ConcurrentWorkflow` class is the core class that manages the concurrent execution of agents. It inherits from `BaseSwarm` and includes several key attributes and methods to facilitate this process.
### Attributes
| Attribute | Type | Description |
|------------------------|-------------------------|-----------------------------------------------------------|
| `name` | `str` | The name of the workflow. Defaults to `"ConcurrentWorkflow"`. |
| `description` | `str` | A brief description of the workflow. |
| `agents` | `List[Agent]` | A list of agents to be executed concurrently. |
| `metadata_output_path` | `str` | Path to save the metadata output. Defaults to `"agent_metadata.json"`. |
| `auto_save` | `bool` | Flag indicating whether to automatically save the metadata. |
| `output_schema` | `BaseModel` | The output schema for the metadata, defaults to `MetadataSchema`. |
| `max_loops` | `int` | Maximum number of loops for the workflow, defaults to `1`. |
| `return_str_on` | `bool` | Flag to return output as string. Defaults to `False`. |
| `agent_responses` | `List[str]` | List of agent responses as strings. |
## Methods
### ConcurrentWorkflow.\_\_init\_\_
Initializes the `ConcurrentWorkflow` class with the provided parameters.
#### Parameters
| Parameter | Type | Default Value | Description |
|-----------------------|----------------|----------------------------------------|-----------------------------------------------------------|
| `name` | `str` | `"ConcurrentWorkflow"` | The name of the workflow. |
| `description` | `str` | `"Execution of multiple agents concurrently"` | A brief description of the workflow. |
| `agents` | `List[Agent]` | `[]` | A list of agents to be executed concurrently. |
| `metadata_output_path`| `str` | `"agent_metadata.json"` | Path to save the metadata output. |
| `auto_save` | `bool` | `False` | Flag indicating whether to automatically save the metadata. |
| `output_schema` | `BaseModel` | `MetadataSchema` | The output schema for the metadata. |
| `max_loops` | `int` | `1` | Maximum number of loops for the workflow. |
| `return_str_on` | `bool` | `False` | Flag to return output as string. |
| `agent_responses` | `List[str]` | `[]` | List of agent responses as strings. |
#### Raises
- `ValueError`: If the list of agents is empty or if the description is empty.
### ConcurrentWorkflow._run_agent
Runs a single agent with the provided task and tracks its output and metadata.
#### Parameters
| Parameter | Type | Description |
|-------------|-------------------------|-----------------------------------------------------------|
| `agent` | `Agent` | The agent instance to run. |
| `task` | `str` | The task or query to give to the agent. |
| `executor` | `ThreadPoolExecutor` | The thread pool executor to use for running the agent task. |
#### Returns
- `AgentOutputSchema`: The metadata and output from the agent's execution.
#### Detailed Explanation
This method handles the execution of a single agent by offloading the task to a thread using `ThreadPoolExecutor`. It also tracks the time taken by the agent to complete the task and logs relevant information. If an exception occurs during execution, it captures the error and includes it in the output.
class swarms.structs.ConcurrentWorkflow(max_workers, autosave, saved_state_filepath):
"""
ConcurrentWorkflow class for running a set of tasks concurrently using N autonomous agents.
### ConcurrentWorkflow.transform_metadata_schema_to_str
Args:
- max_workers (int): The maximum number of workers to use for concurrent execution.
- autosave (bool): Whether to autosave the workflow state.
- saved_state_filepath (Optional[str]): The file path to save the workflow state.
Transforms the metadata schema into a string format.
"""
#### Parameters
def add(self, task, tasks=None):
"""Adds a task to the workflow.
| Parameter | Type | Description |
|-------------|---------------------|-----------------------------------------------------------|
| `schema` | `MetadataSchema` | The metadata schema to transform. |
Args:
- task (Task): Task to add to the workflow.
- tasks (List[Task]): List of tasks to add to the workflow (optional).
#### Returns
"""
try:
# Implementation of the function goes here
except Exception as error:
print(f"[ERROR][ConcurrentWorkflow] {error}")
raise error
- `str`: The metadata schema as a formatted string.
def run(self, print_results=False, return_results=False):
"""
Executes the tasks in parallel using a ThreadPoolExecutor.
#### Detailed Explanation
Args:
- print_results (bool): Whether to print the results of each task. Default is False.
- return_results (bool): Whether to return the results of each task. Default is False.
This method converts the metadata stored in `MetadataSchema` into a human-readable string format, particularly focusing on the agent names and their respective outputs. This is useful for quickly reviewing the results of the concurrent workflow in a more accessible format.
Returns:
- (List[Any]): A list of the results of each task, if return_results is True. Otherwise, returns None.
### ConcurrentWorkflow._execute_agents_concurrently
"""
try:
# Implementation of the function goes here
except Exception as e:
print(f"Task {task} generated an exception: {e}")
Executes multiple agents concurrently with the same task.
return results if self.return_results else None
#### Parameters
def _execute_task(self, task):
"""Executes a task.
| Parameter | Type | Description |
|-------------|--------------|-----------------------------------------------------------|
| `task` | `str` | The task or query to give to all agents. |
Args:
- task (Task): Task to execute.
#### Returns
Returns:
- result: The result of executing the task.
- `MetadataSchema`: The aggregated metadata and outputs from all agents.
"""
try:
# Implementation of the function goes here
except Exception as error:
print(f"[ERROR][ConcurrentWorkflow] {error}")
raise error
#### Detailed Explanation
This method is responsible for managing the concurrent execution of all agents. It uses `asyncio.gather` to run multiple agents simultaneously and collects their outputs into a `MetadataSchema` object. This aggregated metadata can then be saved or returned depending on the workflow configuration.
### ConcurrentWorkflow.run
Runs the workflow for the provided task, executes agents concurrently, and saves metadata.
#### Parameters
| Parameter | Type | Description |
|-------------|--------------|-----------------------------------------------------------|
| `task` | `str` | The task or query to give to all agents. |
#### Returns
- `Dict[str, Any]`: The final metadata as a dictionary.
#### Detailed Explanation
This is the main method that a user will call to execute the workflow. It manages the entire process from starting the agents to collecting and optionally saving the metadata. The method also provides flexibility in how the results are returned—either as a JSON dictionary or as a formatted string.
## Usage Examples
### Example 1: Basic Usage
```python
import os
from swarms import Agent, ConcurrentWorkflow, OpenAIChat
# Initialize agents
model = OpenAIChat(
api_key=os.getenv("OPENAI_API_KEY"),
model_name="gpt-4o-mini",
temperature=0.1,
)
# Define custom system prompts for each social media platform
TWITTER_AGENT_SYS_PROMPT = """
You are a Twitter marketing expert specializing in real estate. Your task is to create engaging, concise tweets to promote properties, analyze trends to maximize engagement, and use appropriate hashtags and timing to reach potential buyers.
"""
INSTAGRAM_AGENT_SYS_PROMPT = """
You are an Instagram marketing expert focusing on real estate. Your task is to create visually appealing posts with engaging captions and hashtags to showcase properties, targeting specific demographics interested in real estate.
"""
FACEBOOK_AGENT_SYS_PROMPT = """
You are a Facebook marketing expert for real estate. Your task is to craft posts optimized for engagement and reach on Facebook, including using images, links, and targeted messaging to attract potential property buyers.
"""
LINKEDIN_AGENT_SYS_PROMPT = """
You are a LinkedIn marketing expert for the real estate industry. Your task is to create professional and informative posts, highlighting property features, market trends, and investment opportunities, tailored to professionals and investors.
"""
EMAIL_AGENT_SYS_PROMPT = """
You are an Email marketing expert specializing in real estate. Your task is to write compelling email campaigns to promote properties, focusing on personalization, subject lines, and effective call-to-action strategies to drive conversions.
"""
# Initialize your agents for different social media platforms
agents = [
Agent(
agent_name="Twitter-RealEstate-Agent",
system_prompt=TWITTER_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="twitter_realestate_agent.json",
user_name="swarm_corp",
retry_attempts=1,
),
Agent(
agent_name="Instagram-RealEstate-Agent",
system_prompt=INSTAGRAM_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="instagram_realestate_agent.json",
user_name="swarm_corp",
retry_attempts=1,
),
Agent(
agent_name="Facebook-RealEstate-Agent",
system_prompt=FACEBOOK_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="facebook_realestate_agent.json",
user_name="swarm_corp",
retry_attempts=1,
),
Agent(
agent_name="LinkedIn-RealEstate-Agent",
system_prompt=LINKEDIN_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="linkedin_realestate_agent.json",
user_name="swarm_corp",
retry_attempts=1,
),
Agent(
agent_name="Email-RealEstate-Agent",
system_prompt=EMAIL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="email_realestate_agent.json",
user_name="swarm_corp",
retry_attempts=1,
),
]
# Initialize workflow
workflow = ConcurrentWorkflow(
name = "Real Estate Marketing Swarm",
agents=agents,
metadata_output_path="metadata.json",
description="Concurrent swarm of content generators for real estate!",
auto_save=True,
)
# Run workflow
task = "Analyze the financial impact of a new product launch."
metadata = workflow.run(task)
print(metadata)
```
### Example 2: Custom Output Handling
```python
# Run workflow with string output
workflow = ConcurrentWorkflow(agents=agents, return_str_on=True)
metadata_str = workflow.run(task)
print(metadata_str)
```
### Example 3: Error Handling and Debugging
```python
try:
metadata = workflow.run(task)
except ValueError as e:
print(f"An error occurred: {e}")
```
# Usage example:
## Tips and Best Practices
from swarms.models import OpenAIChat
from swarms.structs import ConcurrentWorkflow
- **Agent Initialization**: Ensure that all agents are correctly initialized with their required configurations before passing them to `ConcurrentWorkflow`.
- **Metadata Management**: Use the `auto_save` flag to automatically save metadata if you plan to run multiple workflows in succession.
- **Concurrency Limits**: Adjust the number of agents based on your system's capabilities to avoid overloading resources.
- **Error Handling**: Implement try-except blocks when running workflows to catch and handle exceptions gracefully.
llm = OpenAIChat(openai_api_key="")
workflow = ConcurrentWorkflow(max_workers=5)
workflow.add("What's the weather in miami", llm)
workflow.add("Create a report on these metrics", llm)
workflow.run()
workflow.tasks
## References and Resources
"""
```
- [Python's `asyncio` Documentation](https://docs.python.org/3/library/asyncio.html)
- [Pydantic Documentation](https://pydantic-docs.helpmanual.io/)
- [ThreadPoolExecutor in Python](https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor)
- [Loguru for Logging in Python](https://loguru.readthedocs.io/en/stable/)

@ -152,7 +152,7 @@ The Agent class provides built-in support for long-term memory, allowing agents
```python
from swarms.memory import BaseVectorDatabase
from swarms_memory import BaseVectorDatabase
from swarms import Agent

@ -65,7 +65,7 @@ This method is intended for internal use and is typically called by the `run` me
```python
# Create a task and result
task = Task()
result = task.execute()
result = task.run()
# Autosave the result
workflow = MultiThreadedWorkflow()

@ -29,7 +29,7 @@ The `SpreadSheetSwarm` class contains several attributes that define its behavio
| `lock` | `threading.Lock` | A lock used for thread synchronization to prevent race conditions. |
| `metadata` | `SwarmRunMetadata` | Metadata for the swarm run, including start time, end time, tasks completed, and outputs. |
| `run_all_agents` | `bool` | Flag indicating whether to run all agents or just one. |
| `repeat_count` | `int` | The number of times to repeat the task. |
| `max_loops` | `int` | The number of times to repeat the task. |
| `workspace_dir` | `str` | The directory where the workspace is located, retrieved from environment variables. |
### Parameters
@ -40,7 +40,7 @@ The `SpreadSheetSwarm` class contains several attributes that define its behavio
- **`autosave_on`** (`bool`, optional): A flag to indicate if autosave is enabled. Default is `True`.
- **`save_file_path`** (`str`, optional): The file path where swarm data will be saved. Default is `"spreedsheet_swarm.csv"`.
- **`run_all_agents`** (`bool`, optional): Flag to determine if all agents should run. Default is `True`.
- **`repeat_count`** (`int`, optional): The number of times to repeat the task. Default is `1`.
- **`max_loops`** (`int`, optional): The number of times to repeat the task. Default is `1`.
- **`workspace_dir`** (`str`, optional): The directory where the workspace is located. Default is retrieved from environment variable `WORKSPACE_DIR`.
### Constructor (`__init__`)
@ -227,7 +227,7 @@ swarm = SpreadSheetSwarm(
autosave_on=True,
save_file_path="financial_spreed_sheet_swarm_demo.csv",
run_all_agents=False,
repeat_count=1,
max_loops=1,
)
# Run the swarm
@ -293,7 +293,7 @@ swarm = SpreadSheetSwarm(
autosave_on=True,
save_file_path="qr_code_generation_results.csv",
run_all_agents=False,
repeat_count=1,
max_loops=1,
)
# Run the swarm
@ -388,7 +388,7 @@ swarm = SpreadSheetSwarm(
autosave_on=True,
save_file_path="social_media_marketing_spreadsheet.csv",
run_all_agents=False,
repeat_count=2,
max_loops=2,
)
# Run the swarm

@ -24,7 +24,7 @@ The `StepInput` class is designed to accept any input value, providing flexibili
### Usage Example 1:
```python
from swarms.structs import StepInput
from swarms import StepInput
input_params = {"file_to_refactor": "models.py", "refactor_method": "code"}
step_input = StepInput(__root__=input_params)
@ -34,7 +34,7 @@ In this example, we import the `StepInput` class from the `swarms.structs` libra
### Usage Example 2:
```python
from swarms.structs import StepInput
from swarms import StepInput
input_params = {"input_path": "data.csv", "output_path": "result.csv"}
step_input = StepInput(__root__=input_params)
@ -44,7 +44,7 @@ In this example, we again create an instance of `StepInput` by passing a diction
### Usage Example 3:
```python
from swarms.structs import StepInput
from swarms import StepInput
file_path = "config.json"
with open(file_path) as f:

@ -49,7 +49,7 @@ Executes the task by calling the agent or model with the specified arguments and
>>> from swarms.models import OpenAIChat
>>> agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False)
>>> task = Task(description="What's the weather in Miami?", agent=agent)
>>> task.execute()
>>> task.run()
>>> task.result
```

@ -3,18 +3,24 @@ from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from dotenv import load_dotenv
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
openai_api_key=api_key,
model_name="gpt-4o-mini",
temperature=0.1,
max_tokens=2000,
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
@ -27,6 +33,7 @@ agent = Agent(
retry_attempts=1,
context_length=200000,
return_step_meta=False,
# output_type="json",
)

@ -38,7 +38,9 @@ class AutogenAgentWrapper(Agent):
human_input_mode="NEVER",
)
def run(self, task: str, *args: Any, **kwargs: Any) -> Optional[str]:
def run(
self, task: str, *args: Any, **kwargs: Any
) -> Optional[str]:
"""
Run the AutogenAgentWrapper.
@ -62,7 +64,10 @@ class AutogenAgentWrapper(Agent):
llm_config = {
"config_list": [
{"model": "gpt-4", "api_key": os.environ.get("OPENAI_API_KEY")}
{
"model": "gpt-4",
"api_key": os.environ.get("OPENAI_API_KEY"),
}
]
}

@ -59,7 +59,10 @@ class CrewAIAgentWrapper(Agent):
"""
try:
crew_task = Task(
description=task, agent=self.crewai_agent, *args, **kwargs
description=task,
agent=self.crewai_agent,
*args,
**kwargs,
)
crew = Crew(
agents=[self.crewai_agent],

@ -1,57 +0,0 @@
import json
import os
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
import asyncio
from swarms.telemetry.async_log_telemetry import send_telemetry
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent-General-11",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
# interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
# tools=[#Add your functions here# ],
# stopping_token="Stop!",
# docs_folder="docs", # Enter your folder name
# pdf_path="docs/finance_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
user_name="swarms_corp",
# # docs="",
retry_attempts=3,
# context_length=1000,
# tool_schema = dict
context_length=200000,
tool_system_prompt=None,
)
# # Convert the agent object to a dictionary
data = agent.to_dict()
data = json.dumps(data)
# Async
async def send_data():
response_status, response_data = await send_telemetry(data)
print(response_status, response_data)
# Run the async function
asyncio.run(send_data())

@ -72,9 +72,6 @@ agent.tokens_checks()
# Print the dashboard of the agent
agent.print_dashboard()
# Print the history and memory of the agent
agent.print_history_and_memory()
# Fetch all the documents from the doc folders
agent.get_docs_from_doc_folders()
@ -85,3 +82,7 @@ agent.check_end_session_agentops()
# Dump the model to a JSON file
agent.model_dump_json()
print(agent.to_toml())
# Print all of the output metadata of the agent
print(agent.agent_output.model_dump())
print(agent.agent_output.model_dump_json())

@ -0,0 +1,40 @@
import os
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key,
model_name="o1-preview",
temperature=0.1,
max_tokens=100,
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=2,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=False,
# output_type="json",
)
out = agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
)
print(out)

@ -0,0 +1,40 @@
import os
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key,
model_name="o1-preview",
temperature=0.1,
max_tokens=100,
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=2,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=False,
# output_type="json",
)
out = agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
)
print(out)

@ -0,0 +1,40 @@
import os
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from swarms.structs.agent import Agent
from swarms import OpenAIChat
# Example usage:
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=2,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
)
out = agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
)
print(out)

@ -8,7 +8,9 @@ import os
class Schema(BaseModel):
name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person")
is_student: bool = Field(..., title="Whether the person is a student")
is_student: bool = Field(
..., title="Whether the person is a student"
)
courses: list[str] = Field(
..., title="List of courses the person is taking"
)
@ -23,7 +25,9 @@ tool_schema = Schema(
)
# Define the task to generate a person's information
task = "Generate a person's information based on the following schema:"
task = (
"Generate a person's information based on the following schema:"
)
# Initialize the agent
agent = Agent(

@ -14,7 +14,8 @@ def search_api(query: str, max_results: int = 10):
agent = Agent(
agent_name="Youtube Transcript Generator",
agent_description=(
"Generate a transcript for a youtube video on what swarms" " are!"
"Generate a transcript for a youtube video on what swarms"
" are!"
),
llm=Anthropic(),
max_loops="auto",

@ -72,7 +72,9 @@ model = OpenAIFunctionCaller(
)
def parse_json_for_agents_then_create_agents(function_call: dict) -> str:
def parse_json_for_agents_then_create_agents(
function_call: dict,
) -> str:
agents = []
for agent in json["multiple_agents"]:
agents.append(

@ -117,7 +117,9 @@ class APIRequestAgent:
Dict[str, Any]: The parsed response content.
"""
try:
logger.info(f"Response status code: {response.status_code}")
logger.info(
f"Response status code: {response.status_code}"
)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
@ -184,7 +186,9 @@ class APIRequestAgent:
logging.error(f"RequestException: {e}")
raise
def execute_api_request(self, task: APITaskSchema) -> Dict[str, Any]:
def execute_api_request(
self, task: APITaskSchema
) -> Dict[str, Any]:
"""
Executes a single step: sends the request and processes the response.

@ -71,5 +71,7 @@ model = OpenAIFunctionCaller(
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
out = model.run("We need to craft a diamond pickaxe to mine the obsidian.")
out = model.run(
"We need to craft a diamond pickaxe to mine the obsidian."
)
print(out)

@ -4,7 +4,9 @@ from swarms import tool
# Create the wrapper to wrap the function
@tool(
name="Geo Coordinates Locator",
description=("Locates geo coordinates with a city and or zip code"),
description=(
"Locates geo coordinates with a city and or zip code"
),
return_string=False,
return_dict=False,
)

@ -192,7 +192,9 @@ system_prompt = """
"""
agent = PromptGeneratorAgent(system_prompt=system_prompt, max_tokens=4000)
agent = PromptGeneratorAgent(
system_prompt=system_prompt, max_tokens=4000
)
response = agent.run(
"Create a prompt for an agent to analyze complicated cashflow statements and generate a summary report."

@ -21,7 +21,9 @@ json_schema = {
}
# Define the task to generate a person's information
task = "Generate a person's information based on the following schema:"
task = (
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class
agent = ToolAgent(

@ -28,7 +28,9 @@ class APIExampleRequestSchema(BaseModel):
headers: dict = Field(
..., description="The headers for the example request"
)
body: dict = Field(..., description="The body of the example request")
body: dict = Field(
..., description="The body of the example request"
)
response: dict = Field(
...,
description="The expected response of the example request",

@ -3,7 +3,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
model = AutoModelForCausalLM.from_pretrained(
"databricks/dolly-v2-12b"
)
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
# Define a JSON schema for person's information
@ -18,7 +20,9 @@ json_schema = {
}
# Define the task to generate a person's information
task = "Generate a person's information based on the following schema:"
task = (
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class
agent = ToolAgent(

@ -28,7 +28,9 @@ class APIExampleRequestSchema(BaseModel):
headers: dict = Field(
..., description="The headers for the example request"
)
body: dict = Field(..., description="The body of the example request")
body: dict = Field(
..., description="The body of the example request"
)
response: dict = Field(
...,
description="The expected response of the example request",

@ -17,7 +17,9 @@ tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
class Schema(BaseModel):
name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person")
is_student: bool = Field(..., title="Whether the person is a student")
is_student: bool = Field(
..., title="Whether the person is a student"
)
courses: list[str] = Field(
..., title="List of courses the person is taking"
)
@ -27,7 +29,9 @@ class Schema(BaseModel):
tool_schema = base_model_to_json(Schema)
# Define the task to generate a person's information
task = "Generate a person's information based on the following schema:"
task = (
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class
agent = ToolAgent(

@ -19,7 +19,9 @@ chat = OpenAIChat(
class Schema(BaseModel):
name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person")
is_student: bool = Field(..., title="Whether the person is a student")
is_student: bool = Field(
..., title="Whether the person is a student"
)
courses: list[str] = Field(
..., title="List of courses the person is taking"
)
@ -29,7 +31,9 @@ class Schema(BaseModel):
tool_schema = base_model_to_json(Schema)
# Define the task to generate a person's information
task = "Generate a person's information based on the following schema:"
task = (
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class
agent = ToolAgent(

@ -0,0 +1,73 @@
024-08-23T16:57:09.831419-0400 Autosaving agent state.
2024-08-23T16:57:09.832168-0400 Saving Agent Financial-Analysis-Agent_sas_chicken_eej state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:57:09.833811-0400 Saved agent state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:57:09.835655-0400 Function metrics: {
"execution_time": 7.066652059555054,
"memory_usage": -130.59375,
"cpu_usage": -18.6,
"io_operations": 1562,
"function_calls": 1
}
swarms [
s_chicken_eej state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:58:50.884436-0400 Saved agent state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:58:50.887356-0400 Function metrics: {
"execution_time": 12.482966899871826,
"memory_usage": -323.140625,
"cpu_usage": -11.099999999999998,
"io_operations": 8723,
"function_calls": 1
}
s_chicken_eej state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:58:50.884436-0400 Saved agent state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:58:50.887356-0400 Function metrics: {
"execution_time": 12.482966899871826,
"memory_usage": -323.140625,
"cpu_usage": -11.099999999999998,
"io_operations": 8723,
"function_calls": 1
}
en_eej_state.json
2024-08-23T17:00:19.967511-0400 Saved agent state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T17:00:19.969208-0400 Function metrics: {
"execution_time": 8.775875091552734,
"memory_usage": -70.046875,
"cpu_usage": -16.2,
"io_operations": 7530,
"function_calls": 1
}
Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T17:00:45.474628-0400 Function metrics: {
"execution_time": 8.27669095993042,
"memory_usage": -197.34375,
"cpu_usage": -12.5,
"io_operations": 7955,
"function_calls": 1
}
Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T17:01:53.768837-0400 Function metrics: {
"execution_time": 11.86063528060913,
"memory_usage": -48.453125,
"cpu_usage": -16.5,
"io_operations": 5022,
"function_calls": 1
}
#############
Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T17:03:39.670708-0400 Function metrics: {
"execution_time": 8.982940912246704,
"memory_usage": -321.171875,
"cpu_usage": -12.5,
"io_operations": 3118,
"function_calls": 1
}

@ -0,0 +1,73 @@
024-08-23T16:57:09.831419-0400 Autosaving agent state.
2024-08-23T16:57:09.832168-0400 Saving Agent Financial-Analysis-Agent_sas_chicken_eej state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:57:09.833811-0400 Saved agent state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:57:09.835655-0400 Function metrics: {
"execution_time": 7.066652059555054,
"memory_usage": -130.59375,
"cpu_usage": -18.6,
"io_operations": 1562,
"function_calls": 1
}
swarms [
s_chicken_eej state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:58:50.884436-0400 Saved agent state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:58:50.887356-0400 Function metrics: {
"execution_time": 12.482966899871826,
"memory_usage": -323.140625,
"cpu_usage": -11.099999999999998,
"io_operations": 8723,
"function_calls": 1
}
s_chicken_eej state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:58:50.884436-0400 Saved agent state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T16:58:50.887356-0400 Function metrics: {
"execution_time": 12.482966899871826,
"memory_usage": -323.140625,
"cpu_usage": -11.099999999999998,
"io_operations": 8723,
"function_calls": 1
}
en_eej_state.json
2024-08-23T17:00:19.967511-0400 Saved agent state to: Financial-Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T17:00:19.969208-0400 Function metrics: {
"execution_time": 8.775875091552734,
"memory_usage": -70.046875,
"cpu_usage": -16.2,
"io_operations": 7530,
"function_calls": 1
}
Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T17:00:45.474628-0400 Function metrics: {
"execution_time": 8.27669095993042,
"memory_usage": -197.34375,
"cpu_usage": -12.5,
"io_operations": 7955,
"function_calls": 1
}
Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T17:01:53.768837-0400 Function metrics: {
"execution_time": 11.86063528060913,
"memory_usage": -48.453125,
"cpu_usage": -16.5,
"io_operations": 5022,
"function_calls": 1
}
#############
Analysis-Agent_sas_chicken_eej_state.json
2024-08-23T17:03:39.670708-0400 Function metrics: {
"execution_time": 8.982940912246704,
"memory_usage": -321.171875,
"cpu_usage": -12.5,
"io_operations": 3118,
"function_calls": 1
}

@ -8,7 +8,6 @@
"source": [
"# pip3 install multion\n",
"# pip3 install swarms\n",
"import multion\n",
"from multion.client import MultiOn\n",
"from swarms import Agent\n",
"import os\n",

@ -44,7 +44,9 @@ def clean_model_code(model_code_str: str) -> str:
Returns:
str: The cleaned-up model code.
"""
cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
cleaned_code = model_code_str.replace("\\n", "\n").replace(
"\\'", "'"
)
return cleaned_code.strip()
@ -65,7 +67,9 @@ def generate_novel_model() -> Dict[str, str]:
}
def generate_and_save_model(i: int, dataset: List[Dict[str, str]]) -> None:
def generate_and_save_model(
i: int, dataset: List[Dict[str, str]]
) -> None:
"""
Generate, clean, save, and add the model data to a dataset.
@ -85,7 +89,9 @@ def generate_and_save_model(i: int, dataset: List[Dict[str, str]]) -> None:
dataset.append(model_data)
def save_to_jsonl(dataset: List[Dict[str, str]], file_path: str) -> None:
def save_to_jsonl(
dataset: List[Dict[str, str]], file_path: str
) -> None:
"""
Appends the dataset to an existing JSONL file, or creates a new file if it doesn't exist.
@ -110,7 +116,9 @@ def upload_to_huggingface(
dataset_name (str): The name of the dataset on Hugging Face.
huggingface_token (str): Your Hugging Face token for authentication.
"""
dataset = load_dataset("json", data_files=file_path, split="train")
dataset = load_dataset(
"json", data_files=file_path, split="train"
)
dataset.push_to_hub(dataset_name, token=huggingface_token)
logger.info(f"Dataset uploaded to Hugging Face: {dataset_name}")
@ -144,7 +152,9 @@ def main(
thread.join()
save_to_jsonl(dataset, jsonl_file_path)
upload_to_huggingface(jsonl_file_path, dataset_name, huggingface_token)
upload_to_huggingface(
jsonl_file_path, dataset_name, huggingface_token
)
# Example usage

@ -70,7 +70,9 @@ def clean_model_code(model_code_str: str) -> str:
Returns:
str: The cleaned-up model code.
"""
cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
cleaned_code = model_code_str.replace("\\n", "\n").replace(
"\\'", "'"
)
return cleaned_code.strip()
@ -105,7 +107,9 @@ def generate_novel_model() -> Dict[str, str]:
}
def generate_and_save_model(i: int, dataset: List[Dict[str, str]]) -> None:
def generate_and_save_model(
i: int, dataset: List[Dict[str, str]]
) -> None:
"""
Generate, clean, save, and add the model data to a dataset.
@ -125,7 +129,9 @@ def generate_and_save_model(i: int, dataset: List[Dict[str, str]]) -> None:
dataset.append(model_data)
def save_to_jsonl(dataset: List[Dict[str, str]], file_path: str) -> None:
def save_to_jsonl(
dataset: List[Dict[str, str]], file_path: str
) -> None:
"""
Appends the dataset to an existing JSONL file, or creates a new file if it doesn't exist.
@ -150,7 +156,9 @@ def upload_to_huggingface(
dataset_name (str): The name of the dataset on Hugging Face.
huggingface_token (str): Your Hugging Face token for authentication.
"""
dataset = load_dataset("json", data_files=file_path, split="train")
dataset = load_dataset(
"json", data_files=file_path, split="train"
)
dataset.push_to_hub(dataset_name, token=huggingface_token)
logger.info(f"Dataset uploaded to Hugging Face: {dataset_name}")
@ -184,7 +192,9 @@ def main(
thread.join()
save_to_jsonl(dataset, jsonl_file_path)
upload_to_huggingface(jsonl_file_path, dataset_name, huggingface_token)
upload_to_huggingface(
jsonl_file_path, dataset_name, huggingface_token
)
# Example usage

@ -51,7 +51,9 @@ model = OpenAIFunctionCaller(
def clean_model_code(model_code_str: str):
# Remove extra escape characters and newlines
cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
cleaned_code = model_code_str.replace("\\n", "\n").replace(
"\\'", "'"
)
# Remove unnecessary leading and trailing whitespaces
cleaned_code = cleaned_code.strip()
@ -81,7 +83,9 @@ def generate_and_execute_model(
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
out = model.run(task)
name, theory, code, example_code = parse_function_call_output(out)
logger.info(f"Algorithm {name}: Mathamatical formulation {theory}")
logger.info(
f"Algorithm {name}: Mathamatical formulation {theory}"
)
# Parse the 3 rows of the output || 0: novel_algorithm_name, 1: mathamatical_formulation, 2: model_code
code = clean_model_code(code)
@ -105,16 +109,22 @@ def generate_and_execute_model(
logger.error(f"Error in code execution: {test}")
if "error" in test_example:
logger.error(f"Error in code execution example: {test_example}")
logger.error(
f"Error in code execution example: {test_example}"
)
else:
logger.info(f"Successfully executed code for novel model {name}")
logger.info(
f"Successfully executed code for novel model {name}"
)
# Create and start a new thread for each model
threads = []
for i in range(10):
thread = threading.Thread(target=generate_and_execute_model, args=(i,))
thread = threading.Thread(
target=generate_and_execute_model, args=(i,)
)
thread.start()
threads.append(thread)

@ -31,7 +31,9 @@ def clean_model_code(model_code_str: str) -> str:
Returns:
str: The cleaned-up model code.
"""
cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
cleaned_code = model_code_str.replace("\\n", "\n").replace(
"\\'", "'"
)
return cleaned_code.strip()

@ -119,4 +119,6 @@ agent = Agent(
)
agent.run("optimize for the Minimal tax holdings at death, end of life")
agent.run(
"optimize for the Minimal tax holdings at death, end of life"
)

@ -45,7 +45,9 @@ def fetch_financial_news(
data = response.json()
if "articles" not in data or len(data["articles"]) == 0:
raise ValueError("No articles found or invalid API response.")
raise ValueError(
"No articles found or invalid API response."
)
articles = data["articles"]
formatted_articles = []

@ -24,5 +24,7 @@ agent = Agent(
# Run
response = agent("What's the best state to incorporate a non profit in?")
response = agent(
"What's the best state to incorporate a non profit in?"
)
print(response)

@ -5,8 +5,12 @@ from typing import Any, Dict, List
from plaid import Client
from plaid.api import plaid_api
from plaid.model.error import PlaidError
from plaid.model.transactions_get_request import TransactionsGetRequest
from plaid.model.transactions_get_response import TransactionsGetResponse
from plaid.model.transactions_get_request import (
TransactionsGetRequest,
)
from plaid.model.transactions_get_response import (
TransactionsGetResponse,
)
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
@ -55,8 +59,8 @@ def fetch_transactions(
)
# Fetch transactions from the Plaid API
response: TransactionsGetResponse = plaid_client.transactions_get(
request
response: TransactionsGetResponse = (
plaid_client.transactions_get(request)
)
# Return the transactions list

@ -0,0 +1,212 @@
"""
Input: person knows a person has a will and a trust or some legal documents
Context: Grab info from the user, name of the person, name of the person's lawyer,
Another agent will up the form,
Output: Create a PDF file with all of the information with the same headings
"""
from typing import Optional
from pydantic import BaseModel, Field
from swarms.models.openai_function_caller import OpenAIFunctionCaller
PROBABE_SYS_PROMPT = """
### System Prompt: Autonomously Fill Out Probate Form DE-122/GC-322
---
**Task Description:**
You are an intelligent LLM agent tasked with filling out the Probate Form DE-122/GC-322 based on the users profile information. The form includes fields such as attorney details, court information, case number, and more. You must extract relevant information from the user's profile, fill out the form fields accurately, and mark the appropriate checkboxes.
---
### Step-by-Step Instructions:
1. **Initialize Profile Data:**
- Begin by loading the users profile data. This profile may include details like the attorney's name, state bar number, address, court details, case information, and any specific instructions provided by the user.
2. **Form Field Mapping:**
- Map the user's profile data to the corresponding form fields. Use the Pydantic schema provided to ensure that each form field is filled with the appropriate data.
- The schema fields include: `attorney_name`, `state_bar_number`, `court_name`, `case_number`, `decedent_name`, and so on.
3. **Field Filling:**
- For each field in the form:
- **Attorney Information:**
- Populate `attorney_name` with the user's attorney name.
- Populate `state_bar_number` with the user's state bar number.
- Fill in `attorney_address`, `court_name`, `case_number`, etc., with the corresponding data from the profile.
- **Court Information:**
- Fill out `court_name`, `court_address`, `hearing_date`, `hearing_time`, `hearing_dept`, and `hearing_room` based on the provided court details.
- **Decedent Information:**
- Populate the `decedent_name` field with the name of the decedent or trust involved in the probate.
4. **Checkbox Selection:**
- Evaluate the circumstances based on the users profile:
- Select `as_individual`, `as_person_cited`, and other relevant checkboxes depending on the users role and the type of service.
- For example, if the user is serving as an individual, mark `as_individual: True`.
- Review legal codes and guidelines associated with each checkbox (e.g., `Code Civ. Proc., § 416.10`) and ensure that the correct checkboxes are selected based on the profile data and case type.
5. **Service Details:**
- If the user profile includes details about how the citation was served (e.g., `served_by_personal_delivery`), fill out the relevant checkboxes and fields (`service_person_name`, `service_date`, `service_time`).
- Include additional information under `service_other_details` if specific instructions are provided.
6. **Validation and Final Review:**
- After filling out all fields and checkboxes, review the completed form to ensure accuracy and completeness.
- Cross-check the filled data with the users profile to confirm that all relevant information has been included.
7. **Finalize and Prepare for Submission:**
- Mark the `acknowledgement_checkbox` if the user has acknowledged the information.
- Insert any final signatures, dates, or additional required fields (`declarant_signature`, `declarant_date`).
- Save the completed form for review by the user or automatically submit it based on the user's instructions.
8. **Provide Output:**
- Return the filled form as a structured data object or PDF, ready for printing or digital submission.
- Optionally, provide a summary of the filled form fields and selected checkboxes for the user's review.
---
**Example Input:**
- User profile includes attorney name: "John Doe", state bar number: "123456", court name: "Superior Court of California", decedent name: "Jane Smith", etc.
**Example Output:**
- The Probate Form DE-122/GC-322 filled with the above data, with all relevant checkboxes and fields correctly populated.
"""
class CitationForm(BaseModel):
attorney_name: Optional[str] = Field(None, alias="FillText7")
state_bar_number: Optional[str] = Field(None, alias="FillText9")
attorney_address: Optional[str] = Field(None, alias="FillText10")
court_name: Optional[str] = Field(None, alias="FillText11")
decedent_name: Optional[str] = Field(None, alias="FillText12")
case_number: Optional[str] = Field(None, alias="FillText13")
hearing_date: Optional[str] = Field(None, alias="FillText14")
hearing_time: Optional[str] = Field(None, alias="FillText15")
hearing_dept: Optional[str] = Field(None, alias="FillText16")
hearing_room: Optional[str] = Field(None, alias="FillText17")
court_address: Optional[str] = Field(None, alias="FillText18")
clerk_name: Optional[str] = Field(None, alias="FillText19")
deputy_name: Optional[str] = Field(None, alias="FillText51")
service_person_name: Optional[str] = Field(
None, alias="FillText54"
)
service_person_address: Optional[str] = Field(
None, alias="FillText1"
)
service_person_telephone: Optional[str] = Field(
None, alias="FillText2"
)
service_date: Optional[str] = Field(None, alias="FillText3")
service_time: Optional[str] = Field(None, alias="FillText56")
# Checkboxes for different roles or actions
as_individual: Optional[bool] = Field(False, alias="CheckBox9")
as_person_cited: Optional[bool] = Field(False, alias="CheckBox8")
under_code_civ_proc_416_10: Optional[bool] = Field(
False, alias="CheckBox7"
)
under_code_civ_proc_416_20: Optional[bool] = Field(
False, alias="CheckBox6"
)
under_code_civ_proc_416_40: Optional[bool] = Field(
False, alias="CheckBox5"
)
under_code_civ_proc_416_60: Optional[bool] = Field(
False, alias="CheckBox4"
)
under_code_civ_proc_416_90: Optional[bool] = Field(
False, alias="CheckBox3"
)
# Further information for detailed service
served_by_personal_delivery: Optional[bool] = Field(
False, alias="CheckBox2"
)
served_by_substituted_service: Optional[bool] = Field(
False, alias="CheckBox1"
)
service_other_details: Optional[str] = Field(
None, alias="FillText57"
)
# Additional fields for various types of services
registered_process_server: Optional[bool] = Field(
False, alias="CheckBox55"
)
exempt_from_registration: Optional[bool] = Field(
False, alias="CheckBox56"
)
other_service_type: Optional[str] = Field(
None, alias="FillText85"
)
service_fees: Optional[str] = Field(None, alias="FillText105")
registration_no: Optional[str] = Field(None, alias="FillText104")
county: Optional[str] = Field(None, alias="FillText103")
expiration_date: Optional[str] = Field(None, alias="FillText102")
# Additional checkboxes and fields based on service details
other_details_1: Optional[str] = Field(None, alias="FillText101")
other_details_2: Optional[str] = Field(None, alias="FillText100")
# Other options and related fields
related_case_name: Optional[str] = Field(None, alias="FillText97")
related_case_checkbox: Optional[bool] = Field(
False, alias="CheckBox78"
)
related_case_info: Optional[str] = Field(None, alias="FillText88")
# Fields for declarations and signature
declarant_signature: Optional[str] = Field(
None, alias="FillText91"
)
declarant_date: Optional[str] = Field(None, alias="FillText90")
declarant_location: Optional[str] = Field(
None, alias="FillText89"
)
# Final section for confirmation and acknowledgments
acknowledgement_checkbox: Optional[bool] = Field(
False, alias="CheckBox80"
)
acknowledgement_date: Optional[str] = Field(
None, alias="FillText87"
)
acknowledgement_signatory: Optional[str] = Field(
None, alias="FillText53"
)
acknowledgement_title: Optional[str] = Field(
None, alias="FillText52"
)
# Footer notices
footer_notice_1: Optional[str] = Field(
None, alias="NoticeHeader1"
)
footer_notice_2: Optional[str] = Field(
None, alias="NoticeFooter1"
)
# Reset and submit actions (for informational purposes)
reset_form: Optional[bool] = Field(False, alias="ResetForm")
save_form: Optional[bool] = Field(False, alias="Save")
print_form: Optional[bool] = Field(False, alias="Print")
# Example usage:
# Initialize the function caller
model = OpenAIFunctionCaller(
system_prompt=PROBABE_SYS_PROMPT,
max_tokens=3500,
temperature=0.9,
base_model=CitationForm,
parallel_tool_calls=False,
)
out = model.run(
"Create a game in python that has never been created before. Create a new form of gaming experience that has never been contemplated before."
)
print(out)

@ -0,0 +1,212 @@
"""
Input: person knows a person has a will and a trust or some legal documents
Context: Grab info from the user, name of the person, name of the person's lawyer,
Another agent will up the form,
Output: Create a PDF file with all of the information with the same headings
"""
from typing import Optional
from pydantic import BaseModel, Field
from swarms.models.openai_function_caller import OpenAIFunctionCaller
PROBABE_SYS_PROMPT = """
### System Prompt: Autonomously Fill Out Probate Form DE-122/GC-322
---
**Task Description:**
You are an intelligent LLM agent tasked with filling out the Probate Form DE-122/GC-322 based on the users profile information. The form includes fields such as attorney details, court information, case number, and more. You must extract relevant information from the user's profile, fill out the form fields accurately, and mark the appropriate checkboxes.
---
### Step-by-Step Instructions:
1. **Initialize Profile Data:**
- Begin by loading the users profile data. This profile may include details like the attorney's name, state bar number, address, court details, case information, and any specific instructions provided by the user.
2. **Form Field Mapping:**
- Map the user's profile data to the corresponding form fields. Use the Pydantic schema provided to ensure that each form field is filled with the appropriate data.
- The schema fields include: `attorney_name`, `state_bar_number`, `court_name`, `case_number`, `decedent_name`, and so on.
3. **Field Filling:**
- For each field in the form:
- **Attorney Information:**
- Populate `attorney_name` with the user's attorney name.
- Populate `state_bar_number` with the user's state bar number.
- Fill in `attorney_address`, `court_name`, `case_number`, etc., with the corresponding data from the profile.
- **Court Information:**
- Fill out `court_name`, `court_address`, `hearing_date`, `hearing_time`, `hearing_dept`, and `hearing_room` based on the provided court details.
- **Decedent Information:**
- Populate the `decedent_name` field with the name of the decedent or trust involved in the probate.
4. **Checkbox Selection:**
- Evaluate the circumstances based on the users profile:
- Select `as_individual`, `as_person_cited`, and other relevant checkboxes depending on the users role and the type of service.
- For example, if the user is serving as an individual, mark `as_individual: True`.
- Review legal codes and guidelines associated with each checkbox (e.g., `Code Civ. Proc., § 416.10`) and ensure that the correct checkboxes are selected based on the profile data and case type.
5. **Service Details:**
- If the user profile includes details about how the citation was served (e.g., `served_by_personal_delivery`), fill out the relevant checkboxes and fields (`service_person_name`, `service_date`, `service_time`).
- Include additional information under `service_other_details` if specific instructions are provided.
6. **Validation and Final Review:**
- After filling out all fields and checkboxes, review the completed form to ensure accuracy and completeness.
- Cross-check the filled data with the users profile to confirm that all relevant information has been included.
7. **Finalize and Prepare for Submission:**
- Mark the `acknowledgement_checkbox` if the user has acknowledged the information.
- Insert any final signatures, dates, or additional required fields (`declarant_signature`, `declarant_date`).
- Save the completed form for review by the user or automatically submit it based on the user's instructions.
8. **Provide Output:**
- Return the filled form as a structured data object or PDF, ready for printing or digital submission.
- Optionally, provide a summary of the filled form fields and selected checkboxes for the user's review.
---
**Example Input:**
- User profile includes attorney name: "John Doe", state bar number: "123456", court name: "Superior Court of California", decedent name: "Jane Smith", etc.
**Example Output:**
- The Probate Form DE-122/GC-322 filled with the above data, with all relevant checkboxes and fields correctly populated.
"""
class CitationForm(BaseModel):
attorney_name: Optional[str] = Field(None, alias="FillText7")
state_bar_number: Optional[str] = Field(None, alias="FillText9")
attorney_address: Optional[str] = Field(None, alias="FillText10")
court_name: Optional[str] = Field(None, alias="FillText11")
decedent_name: Optional[str] = Field(None, alias="FillText12")
case_number: Optional[str] = Field(None, alias="FillText13")
hearing_date: Optional[str] = Field(None, alias="FillText14")
hearing_time: Optional[str] = Field(None, alias="FillText15")
hearing_dept: Optional[str] = Field(None, alias="FillText16")
hearing_room: Optional[str] = Field(None, alias="FillText17")
court_address: Optional[str] = Field(None, alias="FillText18")
clerk_name: Optional[str] = Field(None, alias="FillText19")
deputy_name: Optional[str] = Field(None, alias="FillText51")
service_person_name: Optional[str] = Field(
None, alias="FillText54"
)
service_person_address: Optional[str] = Field(
None, alias="FillText1"
)
service_person_telephone: Optional[str] = Field(
None, alias="FillText2"
)
service_date: Optional[str] = Field(None, alias="FillText3")
service_time: Optional[str] = Field(None, alias="FillText56")
# Checkboxes for different roles or actions
as_individual: Optional[bool] = Field(False, alias="CheckBox9")
as_person_cited: Optional[bool] = Field(False, alias="CheckBox8")
under_code_civ_proc_416_10: Optional[bool] = Field(
False, alias="CheckBox7"
)
under_code_civ_proc_416_20: Optional[bool] = Field(
False, alias="CheckBox6"
)
under_code_civ_proc_416_40: Optional[bool] = Field(
False, alias="CheckBox5"
)
under_code_civ_proc_416_60: Optional[bool] = Field(
False, alias="CheckBox4"
)
under_code_civ_proc_416_90: Optional[bool] = Field(
False, alias="CheckBox3"
)
# Further information for detailed service
served_by_personal_delivery: Optional[bool] = Field(
False, alias="CheckBox2"
)
served_by_substituted_service: Optional[bool] = Field(
False, alias="CheckBox1"
)
service_other_details: Optional[str] = Field(
None, alias="FillText57"
)
# Additional fields for various types of services
registered_process_server: Optional[bool] = Field(
False, alias="CheckBox55"
)
exempt_from_registration: Optional[bool] = Field(
False, alias="CheckBox56"
)
other_service_type: Optional[str] = Field(
None, alias="FillText85"
)
service_fees: Optional[str] = Field(None, alias="FillText105")
registration_no: Optional[str] = Field(None, alias="FillText104")
county: Optional[str] = Field(None, alias="FillText103")
expiration_date: Optional[str] = Field(None, alias="FillText102")
# Additional checkboxes and fields based on service details
other_details_1: Optional[str] = Field(None, alias="FillText101")
other_details_2: Optional[str] = Field(None, alias="FillText100")
# Other options and related fields
related_case_name: Optional[str] = Field(None, alias="FillText97")
related_case_checkbox: Optional[bool] = Field(
False, alias="CheckBox78"
)
related_case_info: Optional[str] = Field(None, alias="FillText88")
# Fields for declarations and signature
declarant_signature: Optional[str] = Field(
None, alias="FillText91"
)
declarant_date: Optional[str] = Field(None, alias="FillText90")
declarant_location: Optional[str] = Field(
None, alias="FillText89"
)
# Final section for confirmation and acknowledgments
acknowledgement_checkbox: Optional[bool] = Field(
False, alias="CheckBox80"
)
acknowledgement_date: Optional[str] = Field(
None, alias="FillText87"
)
acknowledgement_signatory: Optional[str] = Field(
None, alias="FillText53"
)
acknowledgement_title: Optional[str] = Field(
None, alias="FillText52"
)
# Footer notices
footer_notice_1: Optional[str] = Field(
None, alias="NoticeHeader1"
)
footer_notice_2: Optional[str] = Field(
None, alias="NoticeFooter1"
)
# Reset and submit actions (for informational purposes)
reset_form: Optional[bool] = Field(False, alias="ResetForm")
save_form: Optional[bool] = Field(False, alias="Save")
print_form: Optional[bool] = Field(False, alias="Print")
# Example usage:
# Initialize the function caller
model = OpenAIFunctionCaller(
system_prompt=PROBABE_SYS_PROMPT,
max_tokens=3500,
temperature=0.9,
base_model=CitationForm,
parallel_tool_calls=False,
)
out = model.run(
"Create a game in python that has never been created before. Create a new form of gaming experience that has never been contemplated before."
)
print(out)

@ -25,7 +25,9 @@ class HASSchema(BaseModel):
)
swarm_schema = base_model_to_openai_function(HASSchema, output_str=True)
swarm_schema = base_model_to_openai_function(
HASSchema, output_str=True
)
ACCOUNT_MANAGEMENT_SYSTEM_PROMPT = """

@ -79,7 +79,9 @@ summary_agent_output = summary_generator_agent.run(
)
# Provide decision making support to the accountant
decision_making_support_agent_output = decision_making_support_agent.run(
f"{decision_making_support_agent_instructions}:"
f" {summary_agent_output}"
decision_making_support_agent_output = (
decision_making_support_agent.run(
f"{decision_making_support_agent_instructions}:"
f" {summary_agent_output}"
)
)

@ -61,12 +61,16 @@ class ProductAdConceptGenerator:
"in an ice cave setting",
"in a serene and calm landscape",
]
self.contexts = ["high realism product ad (extremely creative)"]
self.contexts = [
"high realism product ad (extremely creative)"
]
def generate_concept(self):
theme = random.choice(self.themes)
context = random.choice(self.contexts)
return f"{theme} inside a {style} {self.product_name}, {context}"
return (
f"{theme} inside a {style} {self.product_name}, {context}"
)
# User input

@ -31,7 +31,9 @@ def test_find_most_similar_podcasts():
graph = create_graph()
weight_edges(graph)
user_list = create_user_list()
most_similar_podcasts = find_most_similar_podcasts(graph, user_list)
most_similar_podcasts = find_most_similar_podcasts(
graph, user_list
)
assert isinstance(most_similar_podcasts, list)

@ -1,7 +1,7 @@
import concurrent
import csv
from swarms import Agent, OpenAIChat
from swarms.memory import ChromaDB
from swarms_memory import ChromaDB
from dotenv import load_dotenv
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.file_processing import create_file
@ -45,7 +45,9 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
) as executor:
futures = []
for i, (fn, args, kwargs) in enumerate(callable_functions):
futures.append(executor.submit(worker, fn, args, kwargs, i))
futures.append(
executor.submit(worker, fn, args, kwargs, i)
)
# Wait for all threads to complete
concurrent.futures.wait(futures)
@ -54,7 +56,9 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
# Adjusting the function to extract specific column values
def extract_and_create_agents(csv_file_path: str, target_columns: list):
def extract_and_create_agents(
csv_file_path: str, target_columns: list
):
"""
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
creates an Agent for each, and adds it to the swarm network.
@ -134,7 +138,8 @@ def extract_and_create_agents(csv_file_path: str, target_columns: list):
# Log the agent
logger.info(
f"Agent created: {agent_name} with long term" " memory"
f"Agent created: {agent_name} with long term"
" memory"
)
agents.append(agent)

@ -16,7 +16,9 @@ def test_pass():
def test_invalid_sports():
assert (
vocal.generate_video("I just ate some delicious tacos", "tacos")
vocal.generate_video(
"I just ate some delicious tacos", "tacos"
)
== "Invalid sports entered!! Please enter a valid sport."
)

@ -55,7 +55,9 @@ class AutoBlogGenSwarm:
):
self.llm = llm()
self.topic_selection_task = topic_selection_task
self.topic_selection_agent_prompt = topic_selection_agent_prompt
self.topic_selection_agent_prompt = (
topic_selection_agent_prompt
)
self.objective = objective
self.iterations = iterations
self.max_retries = max_retries
@ -91,7 +93,9 @@ class AutoBlogGenSwarm:
def step(self):
"""Steps through the task"""
topic_selection_agent = self.llm(self.topic_selection_agent_prompt)
topic_selection_agent = self.llm(
self.topic_selection_agent_prompt
)
topic_selection_agent = self.print_beautifully(
"Topic Selection Agent", topic_selection_agent
)
@ -101,7 +105,9 @@ class AutoBlogGenSwarm:
# Agent that reviews the draft
review_agent = self.llm(self.get_review_prompt(draft_blog))
review_agent = self.print_beautifully("Review Agent", review_agent)
review_agent = self.print_beautifully(
"Review Agent", review_agent
)
# Agent that publishes on social media
distribution_agent = self.llm(

@ -48,7 +48,11 @@ class AutoTemp:
"""
score_text = self.llm(eval_prompt, temperature=0.5)
score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
return round(float(score_match.group()), 1) if score_match else 0.0
return (
round(float(score_match.group()), 1)
if score_match
else 0.0
)
def run(self, prompt, temperature_string):
print("Starting generation process...")

@ -56,11 +56,15 @@ class BlogGen:
)
chosen_topic = topic_output.split("\n")[0]
print(colored("Selected topic: " + chosen_topic, "yellow"))
print(
colored("Selected topic: " + chosen_topic, "yellow")
)
# Initial draft generation with AutoTemp
initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
"{{CHOSEN_TOPIC}}", chosen_topic
initial_draft_prompt = (
self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
"{{CHOSEN_TOPIC}}", chosen_topic
)
)
auto_temp_output = self.auto_temp.run(
initial_draft_prompt, self.temperature_range

@ -449,7 +449,7 @@
"outputs": [],
"source": [
"import os\n",
"from typing import List, Dict\n",
"from typing import List\n",
"\n",
"from swarms import tool\n",
"\n",
@ -514,12 +514,11 @@
"from typing import Callable, List, Optional\n",
"\n",
"import chromadb\n",
"import numpy as np\n",
"from dotenv import load_dotenv\n",
"\n",
"from swarms.utils.data_to_text import data_to_text\n",
"from swarms.utils.markdown_message import display_markdown_message\n",
"from swarms.memory.base_vectordb import AbstractVectorDatabase\n",
"from swarms_memory import AbstractVectorDatabase\n",
"\n",
"# Load environment variables\n",
"load_dotenv()\n",

@ -12,7 +12,9 @@ api_key = os.getenv("OPENAI_API_KEY")
stability_api_key = os.getenv("STABILITY_API_KEY")
# Initialize language model
llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)
llm = OpenAIChat(
openai_api_key=api_key, temperature=0.5, max_tokens=3000
)
# User preferences (can be dynamically set in a real application)
user_preferences = {
@ -28,7 +30,9 @@ curriculum_prompt = edu_prompts.CURRICULUM_DESIGN_PROMPT.format(
interactive_prompt = edu_prompts.INTERACTIVE_LEARNING_PROMPT.format(
**user_preferences
)
sample_prompt = edu_prompts.SAMPLE_TEST_PROMPT.format(**user_preferences)
sample_prompt = edu_prompts.SAMPLE_TEST_PROMPT.format(
**user_preferences
)
image_prompt = edu_prompts.IMAGE_GENERATION_PROMPT.format(
**user_preferences
)
@ -45,7 +49,9 @@ workflow = SequentialWorkflow(max_loops=1)
# Add tasks to workflow with personalized prompts
workflow.add(curriculum_agent, "Generate a curriculum")
workflow.add(interactive_learning_agent, "Generate an interactive lesson")
workflow.add(
interactive_learning_agent, "Generate an interactive lesson"
)
workflow.add(sample_lesson_agent, "Generate a practice test")
# Execute the workflow for text-based tasks

@ -11,7 +11,9 @@ from swarms.structs import Agent
load_dotenv()
FEATURE = "Implement an all-new signup system in typescript using supabase"
FEATURE = (
"Implement an all-new signup system in typescript using supabase"
)
CODEBASE = """
import React, { useState } from 'react';
@ -66,7 +68,9 @@ feature_implementer_backend = Agent(
)
# Create another agent for a different task
tester_agent = Agent(llm=llm, max_loops=1, sop=TEST_SOP, autosave=True)
tester_agent = Agent(
llm=llm, max_loops=1, sop=TEST_SOP, autosave=True
)
# Create another agent for a different task
documenting_agent = Agent(

@ -389,7 +389,7 @@
" tools=[create_and_execute_swarm],\n",
")\n",
"\n",
"print(f\"Agent generating function schema\")\n",
"print(\"Agent generating function schema\")\n",
"boss_agent_creator.run(\"Create a swarm of agents for automating customer support for an e-commerce platform.\")"
]
},

@ -155,7 +155,9 @@ class JambaSwarm:
names, system_prompts = parse_agents(agents)
# Call the function with parsed data
response = create_and_execute_swarm(names, system_prompts, task)
response = create_and_execute_swarm(
names, system_prompts, task
)
# Create and execute swarm
log = JambaSwarmResponse(

@ -44,7 +44,9 @@ class Idea2Image(Agent):
print(f"Generated image at: {img}")
analysis = (
self.vision_api.run(img, current_prompt) if img else None
self.vision_api.run(img, current_prompt)
if img
else None
)
if analysis:
current_prompt += (
@ -145,7 +147,9 @@ gpt_api = OpenAIChat(openai_api_key=openai_api_key)
# Define the modified Idea2Image class here
# Streamlit UI layout
st.title("Explore the infinite Multi-Modal Idea Space with Idea2Image")
st.title(
"Explore the infinite Multi-Modal Idea Space with Idea2Image"
)
user_prompt = st.text_input("Prompt for image generation:")
num_iterations = st.number_input(
"Enter the number of iterations for image improvement:",
@ -164,7 +168,9 @@ if st.button("Generate Image"):
user_prompt, num_iterations, run_folder
)
for i, (enriched_prompt, img_path, analysis) in enumerate(results):
for i, (enriched_prompt, img_path, analysis) in enumerate(
results
):
st.write(f"Iteration {i+1}:")
st.write("Enriched Prompt:", enriched_prompt)
if img_path:

@ -96,7 +96,9 @@ for _ in range(max_iterations):
# Evaluate the image by passing the file path
score = evaluate_img(llm, task, img_path)
print(
colored(f"Evaluated Image Score: {score} for {img_path}", "cyan")
colored(
f"Evaluated Image Score: {score} for {img_path}", "cyan"
)
)
# Update the best score and image path if necessary

@ -77,7 +77,9 @@ def generate_integrated_shopping_list(
meal_plan_output, image_analysis, user_preferences
):
# Prepare the prompt for the LLM
fridge_contents = image_analysis["choices"][0]["message"]["content"]
fridge_contents = image_analysis["choices"][0]["message"][
"content"
]
prompt = (
f"Based on this meal plan: {meal_plan_output}, and the"
f" following items in the fridge: {fridge_contents},"
@ -129,7 +131,9 @@ print("Integrated Shopping List:", integrated_shopping_list)
with open("nutrition_output.txt", "w") as file:
file.write("Meal Plan:\n" + meal_plan_output + "\n\n")
file.write(
"Integrated Shopping List:\n" + integrated_shopping_list + "\n"
"Integrated Shopping List:\n"
+ integrated_shopping_list
+ "\n"
)
print("Outputs have been saved to nutrition_output.txt")

@ -1,4 +1,4 @@
from swarms.memory import WeaviateDB
from swarms_memory import WeaviateDB
weaviate_client = WeaviateDB(
http_host="YOUR_HTTP_HOST",

@ -42,7 +42,9 @@ def get_review_prompt(article):
return prompt
def social_media_prompt(article: str, goal: str = "Clicks and engagement"):
def social_media_prompt(
article: str, goal: str = "Clicks and engagement"
):
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace(
"{{ARTICLE}}", article
).replace("{{GOAL}}", goal)

@ -27,7 +27,9 @@ def cleanup_json_logs(name: str = None):
# Move the file to the target directory
shutil.move(file_path, target_path)
logger.info(f"Moved file {file_path} to {target_path}")
logger.info(
f"Moved file {file_path} to {target_path}"
)
# Delete Chroma and Ruff cache
chroma_cache = os.path.join(root_dir, ".chroma_cache")

@ -24,7 +24,9 @@ async def handle_websocket(websocket, path):
# Broadcast the message to all other users in the public group chats.
for other_websocket in public_group_chats:
if other_websocket != websocket:
await other_websocket.send(f"{username}: {message}")
await other_websocket.send(
f"{username}: {message}"
)
finally:
# Remove the user from the list of public group chats.
public_group_chats.remove(websocket)

@ -48,7 +48,9 @@ def generate_conversation(characters, topic):
# Generate the conversation
conversation = generate_conversation(character_names, conversation_topic)
conversation = generate_conversation(
character_names, conversation_topic
)
# Play the conversation
for line in conversation:

@ -2,7 +2,7 @@ import concurrent
import csv
import os
from swarms import Gemini, Agent
from swarms.memory import ChromaDB
from swarms_memory import ChromaDB
from dotenv import load_dotenv
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.file_processing import create_file
@ -48,7 +48,9 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
) as executor:
futures = []
for i, (fn, args, kwargs) in enumerate(callable_functions):
futures.append(executor.submit(worker, fn, args, kwargs, i))
futures.append(
executor.submit(worker, fn, args, kwargs, i)
)
# Wait for all threads to complete
concurrent.futures.wait(futures)
@ -57,7 +59,9 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
# Adjusting the function to extract specific column values
def extract_and_create_agents(csv_file_path: str, target_columns: list):
def extract_and_create_agents(
csv_file_path: str, target_columns: list
):
"""
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
creates an Agent for each, and adds it to the swarm network.

@ -22,7 +22,9 @@ llm = llama3Hosted(max_tokens=3000)
# Initialize Memory
memory = ChromaDB(output_dir="swarm_mechanic", n_results=2, verbose=True)
memory = ChromaDB(
output_dir="swarm_mechanic", n_results=2, verbose=True
)
# Output

@ -31,7 +31,9 @@ llm = GPT4VisionAPI(openai_api_key=api_key, max_tokens=2000)
assembly_line = (
"examples/demos/swarm_of_mma_manufacturing/assembly_line.jpg"
)
red_robots = "examples/demos/swarm_of_mma_manufacturing/red_robots.jpg"
red_robots = (
"examples/demos/swarm_of_mma_manufacturing/red_robots.jpg"
)
robots = "examples/demos/swarm_of_mma_manufacturing/robots.jpg"
tesla_assembly_line = (
"examples/demos/swarm_of_mma_manufacturing/tesla_assembly.jpg"
@ -125,19 +127,31 @@ health_check = health_security_agent.run(
print(
colored("--------------- Productivity agents initializing...", "green")
colored(
"--------------- Productivity agents initializing...", "green"
)
)
# Add the third task to the productivity_check_agent
productivity_check = productivity_check_agent.run(
health_check, assembly_line
)
print(colored("--------------- Security agents initializing...", "green"))
print(
colored(
"--------------- Security agents initializing...", "green"
)
)
# Add the fourth task to the security_check_agent
security_check = security_check_agent.run(productivity_check, red_robots)
security_check = security_check_agent.run(
productivity_check, red_robots
)
print(colored("--------------- Efficiency agents initializing...", "cyan"))
print(
colored(
"--------------- Efficiency agents initializing...", "cyan"
)
)
# Add the fifth task to the efficiency_check_agent
efficiency_check = efficiency_check_agent.run(
security_check, tesla_assembly_line

@ -12,7 +12,9 @@ api_key = os.getenv("OPENAI_API_KEY")
stability_api_key = os.getenv("STABILITY_API_KEY")
# Initialize language model
llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)
llm = OpenAIChat(
openai_api_key=api_key, temperature=0.5, max_tokens=3000
)
# Initialize Vision model
vision_api = GPT4VisionAPI(api_key=api_key)
@ -49,13 +51,17 @@ workflow = SequentialWorkflow(max_loops=1)
# Add tasks to workflow with personalized prompts
workflow.add(architecture_analysis_agent, "Architecture Analysis")
workflow.add(infrastructure_evaluation_agent, "Infrastructure Evaluation")
workflow.add(
infrastructure_evaluation_agent, "Infrastructure Evaluation"
)
workflow.add(traffic_flow_analysis_agent, "Traffic Flow Analysis")
workflow.add(
environmental_impact_assessment_agent,
"Environmental Impact Assessment",
)
workflow.add(public_space_utilization_agent, "Public Space Utilization")
workflow.add(
public_space_utilization_agent, "Public Space Utilization"
)
workflow.add(
socioeconomic_impact_analysis_agent,
"Socioeconomic Impact Analysis",

@ -1,140 +0,0 @@
import uuid
from typing import Any, List, Optional
from sqlalchemy import JSON, Column, String, create_engine
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from swarms.memory.base_vectordb import BaseVectorDatabase
class PostgresDB(BaseVectorDatabase):
"""
A class representing a Postgres database.
Args:
connection_string (str): The connection string for the Postgres database.
table_name (str): The name of the table in the database.
Attributes:
engine: The SQLAlchemy engine for connecting to the database.
table_name (str): The name of the table in the database.
VectorModel: The SQLAlchemy model representing the vector table.
"""
def __init__(
self, connection_string: str, table_name: str, *args, **kwargs
):
"""
Initializes a new instance of the PostgresDB class.
Args:
connection_string (str): The connection string for the Postgres database.
table_name (str): The name of the table in the database.
"""
self.engine = create_engine(connection_string, *args, **kwargs)
self.table_name = table_name
self.VectorModel = self._create_vector_model()
def _create_vector_model(self):
"""
Creates the SQLAlchemy model for the vector table.
Returns:
The SQLAlchemy model representing the vector table.
"""
Base = declarative_base()
class VectorModel(Base):
__tablename__ = self.table_name
id = Column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
unique=True,
nullable=False,
)
vector = Column(
String
) # Assuming vector is stored as a string
namespace = Column(String)
meta = Column(JSON)
return VectorModel
def add(
self,
vector: str,
vector_id: Optional[str] = None,
namespace: Optional[str] = None,
meta: Optional[dict] = None,
) -> None:
"""
Adds or updates a vector in the database.
Args:
vector (str): The vector to be added or updated.
vector_id (str, optional): The ID of the vector. If not provided, a new ID will be generated.
namespace (str, optional): The namespace of the vector.
meta (dict, optional): Additional metadata associated with the vector.
"""
try:
with Session(self.engine) as session:
obj = self.VectorModel(
id=vector_id,
vector=vector,
namespace=namespace,
meta=meta,
)
session.merge(obj)
session.commit()
except Exception as e:
print(f"Error adding or updating vector: {e}")
def query(
self, query: Any, namespace: Optional[str] = None
) -> List[Any]:
"""
Queries vectors from the database based on the given query and namespace.
Args:
query (Any): The query or condition to filter the vectors.
namespace (str, optional): The namespace of the vectors to be queried.
Returns:
List[Any]: A list of vectors that match the query and namespace.
"""
try:
with Session(self.engine) as session:
q = session.query(self.VectorModel)
if namespace:
q = q.filter_by(namespace=namespace)
# Assuming 'query' is a condition or filter
q = q.filter(query)
return q.all()
except Exception as e:
print(f"Error querying vectors: {e}")
return []
def delete_vector(self, vector_id):
"""
Deletes a vector from the database based on the given vector ID.
Args:
vector_id: The ID of the vector to be deleted.
"""
try:
with Session(self.engine) as session:
obj = session.get(self.VectorModel, vector_id)
if obj:
session.delete(obj)
session.commit()
except Exception as e:
print(f"Error deleting vector: {e}")

@ -1,217 +0,0 @@
from typing import Optional
import pinecone
from attr import define, field
from swarms.memory.base_vectordb import BaseVectorDatabase
from swarms.utils import str_to_hash
@define
class PineconeDB(BaseVectorDatabase):
"""
PineconeDB is a vector storage driver that uses Pinecone as the underlying storage engine.
Pinecone is a vector database that allows you to store, search, and retrieve high-dimensional vectors with
blazing speed and low latency. It is a managed service that is easy to use and scales effortlessly, so you can
focus on building your applications instead of managing your infrastructure.
Args:
api_key (str): The API key for your Pinecone account.
index_name (str): The name of the index to use.
environment (str): The environment to use. Either "us-west1-gcp" or "us-east1-gcp".
project_name (str, optional): The name of the project to use. Defaults to None.
index (pinecone.Index, optional): The Pinecone index to use. Defaults to None.
Methods:
upsert_vector(vector: list[float], vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, **kwargs) -> str:
Upserts a vector into the index.
load_entry(vector_id: str, namespace: Optional[str] = None) -> Optional[BaseVectorStore.Entry]:
Loads a single vector from the index.
load_entries(namespace: Optional[str] = None) -> list[BaseVectorStore.Entry]:
Loads all vectors from the index.
query(query: str, count: Optional[int] = None, namespace: Optional[str] = None, include_vectors: bool = False, include_metadata=True, **kwargs) -> list[BaseVectorStore.QueryResult]:
Queries the index for vectors similar to the given query string.
create_index(name: str, **kwargs) -> None:
Creates a new index.
Usage:
>>> from swarms.memory.vector_stores.pinecone import PineconeDB
>>> from swarms.utils.embeddings import USEEmbedding
>>> from swarms.utils.hash import str_to_hash
>>> from swarms.utils.dataframe import dataframe_to_hash
>>> import pandas as pd
>>>
>>> # Create a new PineconeDB instance:
>>> pv = PineconeDB(
>>> api_key="your-api-key",
>>> index_name="your-index-name",
>>> environment="us-west1-gcp",
>>> project_name="your-project-name"
>>> )
>>> # Create a new index:
>>> pv.create_index("your-index-name")
>>> # Create a new USEEmbedding instance:
>>> use = USEEmbedding()
>>> # Create a new dataframe:
>>> df = pd.DataFrame({
>>> "text": [
>>> "This is a test",
>>> "This is another test",
>>> "This is a third test"
>>> ]
>>> })
>>> # Embed the dataframe:
>>> df["embedding"] = df["text"].apply(use.embed_string)
>>> # Upsert the dataframe into the index:
>>> pv.upsert_vector(
>>> vector=df["embedding"].tolist(),
>>> vector_id=dataframe_to_hash(df),
>>> namespace="your-namespace"
>>> )
>>> # Query the index:
>>> pv.query(
>>> query="This is a test",
>>> count=10,
>>> namespace="your-namespace"
>>> )
>>> # Load a single entry from the index:
>>> pv.load_entry(
>>> vector_id=dataframe_to_hash(df),
>>> namespace="your-namespace"
>>> )
>>> # Load all entries from the index:
>>> pv.load_entries(
>>> namespace="your-namespace"
>>> )
"""
api_key: str = field(kw_only=True)
index_name: str = field(kw_only=True)
environment: str = field(kw_only=True)
project_name: Optional[str] = field(default=None, kw_only=True)
index: pinecone.Index = field(init=False)
def __attrs_post_init__(self) -> None:
"""Post init"""
pinecone.init(
api_key=self.api_key,
environment=self.environment,
project_name=self.project_name,
)
self.index = pinecone.Index(self.index_name)
def add(
self,
vector: list[float],
vector_id: Optional[str] = None,
namespace: Optional[str] = None,
meta: Optional[dict] = None,
**kwargs,
) -> str:
"""Add a vector to the index.
Args:
vector (list[float]): _description_
vector_id (Optional[str], optional): _description_. Defaults to None.
namespace (Optional[str], optional): _description_. Defaults to None.
meta (Optional[dict], optional): _description_. Defaults to None.
Returns:
str: _description_
"""
vector_id = vector_id if vector_id else str_to_hash(str(vector))
params = {"namespace": namespace} | kwargs
self.index.upsert([(vector_id, vector, meta)], **params)
return vector_id
def load_entries(self, namespace: Optional[str] = None):
"""Load all entries from the index.
Args:
namespace (Optional[str], optional): _description_. Defaults to None.
Returns:
_type_: _description_
"""
# This is a hacky way to query up to 10,000 values from Pinecone. Waiting on an official API for fetching
# all values from a namespace:
# https://community.pinecone.io/t/is-there-a-way-to-query-all-the-vectors-and-or-metadata-from-a-namespace/797/5
results = self.index.query(
self.embedding_driver.embed_string(""),
top_k=10000,
include_metadata=True,
namespace=namespace,
)
for result in results["matches"]:
entry = {
"id": result["id"],
"vector": result["values"],
"meta": result["metadata"],
"namespace": result["namespace"],
}
return entry
def query(
self,
query: str,
count: Optional[int] = None,
namespace: Optional[str] = None,
include_vectors: bool = False,
# PineconeDBStorageDriver-specific params:
include_metadata=True,
**kwargs,
):
"""Query the index for vectors similar to the given query string.
Args:
query (str): _description_
count (Optional[int], optional): _description_. Defaults to None.
namespace (Optional[str], optional): _description_. Defaults to None.
include_vectors (bool, optional): _description_. Defaults to False.
include_metadata (bool, optional): _description_. Defaults to True.
Returns:
_type_: _description_
"""
vector = self.embedding_driver.embed_string(query)
params = {
"top_k": count,
"namespace": namespace,
"include_values": include_vectors,
"include_metadata": include_metadata,
} | kwargs
results = self.index.query(vector, **params)
for r in results["matches"]:
entry = {
"id": results["id"],
"vector": results["values"],
"score": results["scores"],
"meta": results["metadata"],
"namespace": results["namespace"],
}
return entry
def create_index(self, name: str, **kwargs) -> None:
"""Create a new index.
Args:
name (str): _description_
"""
params = {
"name": name,
"dimension": self.embedding_driver.dimensions,
} | kwargs
pinecone.create_index(**params)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save