You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
swarms/playground/swarms_example.ipynb

1769 lines
57 KiB

This file contains invisible Unicode characters!

This file contains invisible Unicode characters that may be processed differently from what appears below. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to reveal hidden characters.

{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/evelynmitchell/swarms/blob/master/playground/swarms_example.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "cs5RHepmhkEh"
},
"outputs": [],
"source": [
"!pip3 install -U swarms"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "-d9k3egzgp2_"
},
"source": [
"API key"
]
},
{
"cell_type": "code",
"source": [
"import os\n",
"from google.colab import userdata\n",
"os.environ['OPENAI_API_KEY'] = userdata.get('OPENAI_API_KEY')\n",
"os.environ['SWARMS_API_KEY'] = userdata.get('SWARMS_API_KEY')\n",
"os.environ['ANTHROPIC_API_KEY'] = userdata.get('ANTHROPIC_API_KEY')\n"
],
"metadata": {
"id": "rs9g9q3qgznb"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!pip install anthropic\n",
"!pip install chromadb"
],
"metadata": {
"id": "ApsuGC_txJn_"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"#Agents"
],
"metadata": {
"id": "nSKDrcQNhlZK"
}
},
{
"cell_type": "markdown",
"metadata": {
"id": "SOpV9-9yfxTy"
},
"source": [
"A basic example of how to use the OpenAI API to generate text."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "1rJYyD5NfxTz"
},
"outputs": [],
"source": [
"import os\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"# Import the OpenAIChat model and the Agent struct\n",
"from swarms import Agent, OpenAIChat\n",
"\n",
"# Load the environment variables\n",
"load_dotenv()\n",
"\n",
"# Get the API key from the environment\n",
"api_key = os.environ.get(\"OPENAI_API_KEY\")\n",
"\n",
"# Initialize the language model\n",
"llm = OpenAIChat(\n",
" temperature=0.5, openai_api_key=api_key, max_tokens=4000\n",
")\n",
"\n",
"\n",
"## Initialize the workflow\n",
"agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)\n",
"\n",
"# Run the workflow on a task\n",
"agent.run(\"Generate a 10,000 word blog on health and wellness.\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6VtgQ0F4BNc-"
},
"source": [
"Look at the log, which may be empty."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "RqL5LL3xBLWR"
},
"outputs": [],
"source": [
"!cat errors.txt"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "dS9QM954fxT0"
},
"source": [
"**Agent with Long Term Memory**\n",
"\n",
"```Agent``` equipped with quasi-infinite long term memory. Great for long document understanding, analysis, and retrieval."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "1UmemEBBfxT1"
},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv\n",
"from swarms import Agent, OpenAIChat\n",
"from swarms.playground.memory.chromadb_example import ChromaDB\n",
"import logging\n",
"import os\n",
"import uuid\n",
"from typing import Optional\n",
"import chromadb\n",
"from swarms.utils.data_to_text import data_to_text\n",
"from swarms.utils.markdown_message import display_markdown_message\n",
"from swarms.memory.base_vectordb import BaseVectorDatabase\n",
"\n",
"# Load environment variables\n",
"load_dotenv()\n",
"\n",
"\n",
"# Results storage using local ChromaDB\n",
"class ChromaDB(BaseVectorDatabase):\n",
" \"\"\"\n",
"\n",
" ChromaDB database\n",
"\n",
" Args:\n",
" metric (str): The similarity metric to use.\n",
" output (str): The name of the collection to store the results in.\n",
" limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000.\n",
" n_results (int, optional): The number of results to retrieve. Defaults to 2.\n",
"\n",
" Methods:\n",
" add: _description_\n",
" query: _description_\n",
"\n",
" Examples:\n",
" >>> chromadb = ChromaDB(\n",
" >>> metric=\"cosine\",\n",
" >>> output=\"results\",\n",
" >>> llm=\"gpt3\",\n",
" >>> openai_api_key=OPENAI_API_KEY,\n",
" >>> )\n",
" >>> chromadb.add(task, result, result_id)\n",
" \"\"\"\n",
"\n",
" def __init__(\n",
" self,\n",
" metric: str = \"cosine\",\n",
" output_dir: str = \"swarms\",\n",
" limit_tokens: Optional[int] = 1000,\n",
" n_results: int = 3,\n",
" docs_folder: str = None,\n",
" verbose: bool = False,\n",
" *args,\n",
" **kwargs,\n",
" ):\n",
" self.metric = metric\n",
" self.output_dir = output_dir\n",
" self.limit_tokens = limit_tokens\n",
" self.n_results = n_results\n",
" self.docs_folder = docs_folder\n",
" self.verbose = verbose\n",
"\n",
" # Disable ChromaDB logging\n",
" if verbose:\n",
" logging.getLogger(\"chromadb\").setLevel(logging.INFO)\n",
"\n",
" # Create Chroma collection\n",
" chroma_persist_dir = \"chroma\"\n",
" chroma_client = chromadb.PersistentClient(\n",
" settings=chromadb.config.Settings(\n",
" persist_directory=chroma_persist_dir,\n",
" ),\n",
" *args,\n",
" **kwargs,\n",
" )\n",
"\n",
" # Create ChromaDB client\n",
" self.client = chromadb.Client()\n",
"\n",
" # Create Chroma collection\n",
" self.collection = chroma_client.get_or_create_collection(\n",
" name=output_dir,\n",
" metadata={\"hnsw:space\": metric},\n",
" *args,\n",
" **kwargs,\n",
" )\n",
" display_markdown_message(\n",
" \"ChromaDB collection created:\"\n",
" f\" {self.collection.name} with metric: {self.metric} and\"\n",
" f\" output directory: {self.output_dir}\"\n",
" )\n",
"\n",
" # If docs\n",
" if docs_folder:\n",
" display_markdown_message(\n",
" f\"Traversing directory: {docs_folder}\"\n",
" )\n",
" self.traverse_directory()\n",
"\n",
" def add(\n",
" self,\n",
" document: str,\n",
" *args,\n",
" **kwargs,\n",
" ):\n",
" \"\"\"\n",
" Add a document to the ChromaDB collection.\n",
"\n",
" Args:\n",
" document (str): The document to be added.\n",
" condition (bool, optional): The condition to check before adding the document. Defaults to True.\n",
"\n",
" Returns:\n",
" str: The ID of the added document.\n",
" \"\"\"\n",
" try:\n",
" doc_id = str(uuid.uuid4())\n",
" self.collection.add(\n",
" ids=[doc_id],\n",
" documents=[document],\n",
" *args,\n",
" **kwargs,\n",
" )\n",
" print(\"-----------------\")\n",
" print(\"Document added successfully\")\n",
" print(\"-----------------\")\n",
" return doc_id\n",
" except Exception as e:\n",
" raise Exception(f\"Failed to add document: {str(e)}\")\n",
"\n",
" def query(\n",
" self,\n",
" query_text: str,\n",
" *args,\n",
" **kwargs,\n",
" ):\n",
" \"\"\"\n",
" Query documents from the ChromaDB collection.\n",
"\n",
" Args:\n",
" query (str): The query string.\n",
" n_docs (int, optional): The number of documents to retrieve. Defaults to 1.\n",
"\n",
" Returns:\n",
" dict: The retrieved documents.\n",
" \"\"\"\n",
" try:\n",
" docs = self.collection.query(\n",
" query_texts=[query_text],\n",
" n_results=self.n_results,\n",
" *args,\n",
" **kwargs,\n",
" )[\"documents\"]\n",
" return docs[0]\n",
" except Exception as e:\n",
" raise Exception(f\"Failed to query documents: {str(e)}\")\n",
"\n",
" def traverse_directory(self):\n",
" \"\"\"\n",
" Traverse through every file in the given directory and its subdirectories,\n",
" and return the paths of all files.\n",
" Parameters:\n",
" - directory_name (str): The name of the directory to traverse.\n",
" Returns:\n",
" - list: A list of paths to each file in the directory and its subdirectories.\n",
" \"\"\"\n",
" added_to_db = False\n",
"\n",
" for root, dirs, files in os.walk(self.docs_folder):\n",
" for file in files:\n",
" file_path = os.path.join(root, file) # Change this line\n",
" _, ext = os.path.splitext(file_path)\n",
" data = data_to_text(file_path)\n",
" added_to_db = self.add(str(data))\n",
" print(f\"{file_path} added to Database\")\n",
"\n",
" return added_to_db\n",
"\n",
"# Get the API key from the environment\n",
"api_key = os.environ.get(\"OPENAI_API_KEY\")\n",
"\n",
"\n",
"# Initilaize the chromadb client\n",
"chromadb = ChromaDB(\n",
" metric=\"cosine\",\n",
" output_dir=\"scp\",\n",
" docs_folder=\"artifacts\",\n",
")\n",
"\n",
"# Initialize the language model\n",
"llm = OpenAIChat(\n",
" temperature=0.5,\n",
" openai_api_key=api_key,\n",
" max_tokens=1000,\n",
")\n",
"\n",
"## Initialize the workflow\n",
"agent = Agent(\n",
" llm=llm,\n",
" name = \"Health and Wellness Blog\",\n",
" system_prompt=\"Generate a 10,000 word blog on health and wellness.\",\n",
" max_loops=4,\n",
" autosave=True,\n",
" dashboard=True,\n",
" long_term_memory=chromadb,\n",
" memory_chunk_size=300,\n",
")\n",
"\n",
"# Run the workflow on a task\n",
"agent.run(\"Generate a 10,000 word blog on health and wellness.\")\n",
"\n",
"\n",
"\n",
"## Initialize the workflow\n",
"agent = Agent(\n",
" llm=llm,\n",
" name = \"Health and Wellness Blog\",\n",
" system_prompt=\"Generate a 10,000 word blog on health and wellness.\",\n",
" max_loops=4,\n",
" autosave=True,\n",
" dashboard=True,\n",
" long_term_memory=chromadb,\n",
" memory_chunk_size=300,\n",
")\n",
"\n",
"# Run the workflow on a task\n",
"agent.run(\"Generate a 10,000 word blog on health and wellness.\")\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "bUGZ93NNfxT1"
},
"source": [
"**```Agent``` with Long Term Memory ++ Tools!**\n",
"An LLM equipped with long term memory and tools, a full stack agent capable of automating all and any digital tasks given a good prompt."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "NbBXS2AqfxT2"
},
"outputs": [],
"source": [
"import logging\n",
"import os\n",
"import uuid\n",
"from typing import Optional\n",
"\n",
"import chromadb\n",
"from dotenv import load_dotenv\n",
"\n",
"from swarms.utils.data_to_text import data_to_text\n",
"from swarms.utils.markdown_message import display_markdown_message\n",
"from swarms.memory.base_vectordb import BaseVectorDatabase\n",
"from swarms import Agent, OpenAIChat\n",
"\n",
"\n",
"# Load environment variables\n",
"load_dotenv()\n",
"\n",
"\n",
"\n",
"# Results storage using local ChromaDB\n",
"class ChromaDB(BaseVectorDatabase):\n",
" \"\"\"\n",
"\n",
" ChromaDB database\n",
"\n",
" Args:\n",
" metric (str): The similarity metric to use.\n",
" output (str): The name of the collection to store the results in.\n",
" limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000.\n",
" n_results (int, optional): The number of results to retrieve. Defaults to 2.\n",
"\n",
" Methods:\n",
" add: _description_\n",
" query: _description_\n",
"\n",
" Examples:\n",
" >>> chromadb = ChromaDB(\n",
" >>> metric=\"cosine\",\n",
" >>> output=\"results\",\n",
" >>> llm=\"gpt3\",\n",
" >>> openai_api_key=OPENAI_API_KEY,\n",
" >>> )\n",
" >>> chromadb.add(task, result, result_id)\n",
" \"\"\"\n",
"\n",
" def __init__(\n",
" self,\n",
" metric: str = \"cosine\",\n",
" output_dir: str = \"swarms\",\n",
" limit_tokens: Optional[int] = 1000,\n",
" n_results: int = 3,\n",
" docs_folder: str = None,\n",
" verbose: bool = False,\n",
" *args,\n",
" **kwargs,\n",
" ):\n",
" self.metric = metric\n",
" self.output_dir = output_dir\n",
" self.limit_tokens = limit_tokens\n",
" self.n_results = n_results\n",
" self.docs_folder = docs_folder\n",
" self.verbose = verbose\n",
"\n",
" # Disable ChromaDB logging\n",
" if verbose:\n",
" logging.getLogger(\"chromadb\").setLevel(logging.INFO)\n",
"\n",
" # Create Chroma collection\n",
" chroma_persist_dir = \"chroma\"\n",
" chroma_client = chromadb.PersistentClient(\n",
" settings=chromadb.config.Settings(\n",
" persist_directory=chroma_persist_dir,\n",
" ),\n",
" *args,\n",
" **kwargs,\n",
" )\n",
"\n",
" # Create ChromaDB client\n",
" self.client = chromadb.Client()\n",
"\n",
" # Create Chroma collection\n",
" self.collection = chroma_client.get_or_create_collection(\n",
" name=output_dir,\n",
" metadata={\"hnsw:space\": metric},\n",
" *args,\n",
" **kwargs,\n",
" )\n",
" display_markdown_message(\n",
" \"ChromaDB collection created:\"\n",
" f\" {self.collection.name} with metric: {self.metric} and\"\n",
" f\" output directory: {self.output_dir}\"\n",
" )\n",
"\n",
" # If docs\n",
" if docs_folder:\n",
" display_markdown_message(\n",
" f\"Traversing directory: {docs_folder}\"\n",
" )\n",
" self.traverse_directory()\n",
"\n",
" def add(\n",
" self,\n",
" document: str,\n",
" *args,\n",
" **kwargs,\n",
" ):\n",
" \"\"\"\n",
" Add a document to the ChromaDB collection.\n",
"\n",
" Args:\n",
" document (str): The document to be added.\n",
" condition (bool, optional): The condition to check before adding the document. Defaults to True.\n",
"\n",
" Returns:\n",
" str: The ID of the added document.\n",
" \"\"\"\n",
" try:\n",
" doc_id = str(uuid.uuid4())\n",
" self.collection.add(\n",
" ids=[doc_id],\n",
" documents=[document],\n",
" *args,\n",
" **kwargs,\n",
" )\n",
" print(\"-----------------\")\n",
" print(\"Document added successfully\")\n",
" print(\"-----------------\")\n",
" return doc_id\n",
" except Exception as e:\n",
" raise Exception(f\"Failed to add document: {str(e)}\")\n",
"\n",
" def query(\n",
" self,\n",
" query_text: str,\n",
" *args,\n",
" **kwargs,\n",
" ):\n",
" \"\"\"\n",
" Query documents from the ChromaDB collection.\n",
"\n",
" Args:\n",
" query (str): The query string.\n",
" n_docs (int, optional): The number of documents to retrieve. Defaults to 1.\n",
"\n",
" Returns:\n",
" dict: The retrieved documents.\n",
" \"\"\"\n",
" try:\n",
" docs = self.collection.query(\n",
" query_texts=[query_text],\n",
" n_results=self.n_results,\n",
" *args,\n",
" **kwargs,\n",
" )[\"documents\"]\n",
" return docs[0]\n",
" except Exception as e:\n",
" raise Exception(f\"Failed to query documents: {str(e)}\")\n",
"\n",
" def traverse_directory(self):\n",
" \"\"\"\n",
" Traverse through every file in the given directory and its subdirectories,\n",
" and return the paths of all files.\n",
" Parameters:\n",
" - directory_name (str): The name of the directory to traverse.\n",
" Returns:\n",
" - list: A list of paths to each file in the directory and its subdirectories.\n",
" \"\"\"\n",
" added_to_db = False\n",
"\n",
" for root, dirs, files in os.walk(self.docs_folder):\n",
" for file in files:\n",
" file_path = os.path.join(root, file) # Change this line\n",
" _, ext = os.path.splitext(file_path)\n",
" data = data_to_text(file_path)\n",
" added_to_db = self.add(str(data))\n",
" print(f\"{file_path} added to Database\")\n",
"\n",
" return added_to_db\n",
"\n",
"\n",
"# Making an instance of the ChromaDB class\n",
"memory = ChromaDB(\n",
" metric=\"cosine\",\n",
" n_results=3,\n",
" output_dir=\"results\",\n",
" docs_folder=\"docs\",\n",
")\n",
"\n",
"# Initialize a tool\n",
"def search_api(query: str):\n",
" # Add your logic here\n",
" return query\n",
"\n",
"# Initializing the agent with the Gemini instance and other parameters\n",
"agent = Agent(\n",
" agent_name=\"Covid-19-Chat\",\n",
" agent_description=(\n",
" \"This agent provides information about COVID-19 symptoms.\"\n",
" ),\n",
" llm=OpenAIChat(),\n",
" max_loops=\"auto\",\n",
" autosave=True,\n",
" verbose=True,\n",
" long_term_memory=memory,\n",
" stopping_condition=\"finish\",\n",
" tools=[search_api],\n",
")\n",
"\n",
"# Defining the task and image path\n",
"task = (\"What are the symptoms of COVID-19?\",)\n",
"\n",
"# Running the agent with the specified task and image\n",
"out = agent.run(task)\n",
"print(out)\n"
]
},
{
"cell_type": "markdown",
"source": [
"# Devin"
],
"metadata": {
"id": "PJSxgkkxkxM-"
}
},
{
"cell_type": "code",
"source": [
"from swarms import Agent, Anthropic\n",
"import subprocess\n",
"\n",
"# Model\n",
"llm = Anthropic(\n",
" temperature=0.1,\n",
")\n",
"\n",
"# Tools\n",
"def terminal(\n",
" code: str,\n",
"):\n",
" \"\"\"\n",
" Run code in the terminal.\n",
"\n",
" Args:\n",
" code (str): The code to run in the terminal.\n",
"\n",
" Returns:\n",
" str: The output of the code.\n",
" \"\"\"\n",
" out = subprocess.run(\n",
" code, shell=True, capture_output=True, text=True\n",
" ).stdout\n",
" return str(out)\n",
"\n",
"def browser(query: str):\n",
" \"\"\"\n",
" Search the query in the browser with the `browser` tool.\n",
"\n",
" Args:\n",
" query (str): The query to search in the browser.\n",
"\n",
" Returns:\n",
" str: The search results.\n",
" \"\"\"\n",
" import webbrowser\n",
"\n",
" url = f\"https://www.google.com/search?q={query}\"\n",
" webbrowser.open(url)\n",
" return f\"Searching for {query} in the browser.\"\n",
"\n",
"def create_file(file_path: str, content: str):\n",
" \"\"\"\n",
" Create a file using the file editor tool.\n",
"\n",
" Args:\n",
" file_path (str): The path to the file.\n",
" content (str): The content to write to the file.\n",
"\n",
" Returns:\n",
" str: The result of the file creation operation.\n",
" \"\"\"\n",
" with open(file_path, \"w\") as file:\n",
" file.write(content)\n",
" return f\"File {file_path} created successfully.\"\n",
"\n",
"def file_editor(file_path: str, mode: str, content: str):\n",
" \"\"\"\n",
" Edit a file using the file editor tool.\n",
"\n",
" Args:\n",
" file_path (str): The path to the file.\n",
" mode (str): The mode to open the file in.\n",
" content (str): The content to write to the file.\n",
"\n",
" Returns:\n",
" str: The result of the file editing operation.\n",
" \"\"\"\n",
" with open(file_path, mode) as file:\n",
" file.write(content)\n",
" return f\"File {file_path} edited successfully.\"\n",
"\n",
"\n",
"# Agent\n",
"agent = Agent(\n",
" agent_name=\"Devin\",\n",
" system_prompt=(\n",
" \"Autonomous agent that can interact with humans and other\"\n",
" \" agents. Be Helpful and Kind. Use the tools provided to\"\n",
" \" assist the user. Return all code in markdown format.\"\n",
" ),\n",
" llm=llm,\n",
" max_loops=\"auto\",\n",
" autosave=True,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" interactive=True,\n",
" tools=[terminal, browser, file_editor, create_file],\n",
" code_interpreter=True,\n",
" # streaming=True,\n",
")\n",
"\n",
"# Run the agent\n",
"out = agent(\"Create a new file for a plan to take over the world.\")\n",
"print(out)"
],
"metadata": {
"id": "cGrd4b8JkyPW"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Agentwith Pydantic BaseModel as Output Type"
],
"metadata": {
"id": "DWZxqhc3mjki"
}
},
{
"cell_type": "code",
"source": [
"from pydantic import BaseModel, Field\n",
"from swarms import Anthropic, Agent\n",
"\n",
"\n",
"# Initialize the schema for the person's information\n",
"class Schema(BaseModel):\n",
" name: str = Field(..., title=\"Name of the person\")\n",
" agent: int = Field(..., title=\"Age of the person\")\n",
" is_student: bool = Field(..., title=\"Whether the person is a student\")\n",
" courses: list[str] = Field(\n",
" ..., title=\"List of courses the person is taking\"\n",
" )\n",
"\n",
"\n",
"# Convert the schema to a JSON string\n",
"tool_schema = Schema(\n",
" name=\"Tool Name\",\n",
" agent=1,\n",
" is_student=True,\n",
" courses=[\"Course1\", \"Course2\"],\n",
")\n",
"\n",
"# Define the task to generate a person's information\n",
"task = \"Generate a person's information based on the following schema:\"\n",
"\n",
"# Initialize the agent\n",
"agent = Agent(\n",
" agent_name=\"Person Information Generator\",\n",
" system_prompt=(\n",
" \"Generate a person's information based on the following schema:\"\n",
" ),\n",
" # Set the tool schema to the JSON string -- this is the key difference\n",
" tool_schema=tool_schema,\n",
" llm=Anthropic(),\n",
" max_loops=3,\n",
" autosave=True,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" interactive=True,\n",
" # Set the output type to the tool schema which is a BaseModel\n",
" output_type=tool_schema, # or dict, or str\n",
" metadata_output_type=\"json\",\n",
" # List of schemas that the agent can handle\n",
" list_tool_schemas=[tool_schema],\n",
" function_calling_format_type=\"OpenAI\",\n",
" function_calling_type=\"json\", # or soon yaml\n",
")\n",
"\n",
"# Run the agent to generate the person's information\n",
"generated_data = agent.run(task)\n",
"\n",
"# Print the generated data\n",
"print(f\"Generated data: {generated_data}\")\n",
"\n"
],
"metadata": {
"id": "yuhvrtU8mkv9"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Multi Modal Autonomous Agent"
],
"metadata": {
"id": "m6AIyiYKmtJp"
}
},
{
"cell_type": "code",
"source": [
"# Description: This is an example of how to use the Agent class to run a multi-modal workflow\n",
"import os\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"from swarms import GPT4VisionAPI, Agent\n",
"\n",
"# Load the environment variables\n",
"load_dotenv()\n",
"\n",
"# Get the API key from the environment\n",
"api_key = os.environ.get(\"OPENAI_API_KEY\")\n",
"\n",
"# Initialize the language model\n",
"llm = GPT4VisionAPI(\n",
" openai_api_key=api_key,\n",
" max_tokens=500,\n",
")\n",
"\n",
"# Initialize the task\n",
"task = (\n",
" \"Analyze this image of an assembly line and identify any issues such as\"\n",
" \" misaligned parts, defects, or deviations from the standard assembly\"\n",
" \" process. IF there is anything unsafe in the image, explain why it is\"\n",
" \" unsafe and how it could be improved.\"\n",
")\n",
"img = \"assembly_line.jpg\"\n",
"\n",
"## Initialize the workflow\n",
"agent = Agent(\n",
" llm=llm, max_loops=\"auto\", autosave=True, dashboard=True, multi_modal=True\n",
")\n",
"\n",
"# Run the workflow on a task\n",
"agent.run(task=task, img=img)"
],
"metadata": {
"id": "btPwUmYLmue_"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# ToolAgent"
],
"metadata": {
"id": "aax1iI_r54jc"
}
},
{
"cell_type": "code",
"source": [
"from pydantic import BaseModel, Field\n",
"from transformers import AutoModelForCausalLM, AutoTokenizer\n",
"\n",
"from swarms import ToolAgent\n",
"from swarms.utils.json_utils import base_model_to_json\n",
"\n",
"# Load the pre-trained model and tokenizer\n",
"model = AutoModelForCausalLM.from_pretrained(\n",
" \"databricks/dolly-v2-12b\",\n",
" load_in_4bit=True,\n",
" device_map=\"auto\",\n",
")\n",
"tokenizer = AutoTokenizer.from_pretrained(\"databricks/dolly-v2-12b\")\n",
"\n",
"\n",
"# Initialize the schema for the person's information\n",
"class Schema(BaseModel):\n",
" name: str = Field(..., title=\"Name of the person\")\n",
" agent: int = Field(..., title=\"Age of the person\")\n",
" is_student: bool = Field(\n",
" ..., title=\"Whether the person is a student\"\n",
" )\n",
" courses: list[str] = Field(\n",
" ..., title=\"List of courses the person is taking\"\n",
" )\n",
"\n",
"\n",
"# Convert the schema to a JSON string\n",
"tool_schema = base_model_to_json(Schema)\n",
"\n",
"# Define the task to generate a person's information\n",
"task = (\n",
" \"Generate a person's information based on the following schema:\"\n",
")\n",
"\n",
"# Create an instance of the ToolAgent class\n",
"agent = ToolAgent(\n",
" name=\"dolly-function-agent\",\n",
" description=\"Ana gent to create a child data\",\n",
" model=model,\n",
" tokenizer=tokenizer,\n",
" json_schema=tool_schema,\n",
")\n",
"\n",
"# Run the agent to generate the person's information\n",
"generated_data = agent.run(task)\n",
"\n",
"# Print the generated data\n",
"print(f\"Generated data: {generated_data}\")\n"
],
"metadata": {
"id": "SgJ3xZP156o3"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Task"
],
"metadata": {
"id": "DHfO5dtG6XhZ"
}
},
{
"cell_type": "code",
"source": [
"import os\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"from swarms import Agent, OpenAIChat, Task\n",
"\n",
"# Load the environment variables\n",
"load_dotenv()\n",
"\n",
"\n",
"# Define a function to be used as the action\n",
"def my_action():\n",
" print(\"Action executed\")\n",
"\n",
"\n",
"# Define a function to be used as the condition\n",
"def my_condition():\n",
" print(\"Condition checked\")\n",
" return True\n",
"\n",
"\n",
"# Create an agent\n",
"agent = Agent(\n",
" llm=OpenAIChat(openai_api_key=os.environ[\"OPENAI_API_KEY\"]),\n",
" max_loops=1,\n",
" dashboard=False,\n",
")\n",
"\n",
"# Create a task\n",
"task = Task(\n",
" description=(\n",
" \"Generate a report on the top 3 biggest expenses for small\"\n",
" \" businesses and how businesses can save 20%\"\n",
" ),\n",
" agent=agent,\n",
")\n",
"\n",
"# Set the action and condition\n",
"task.set_action(my_action)\n",
"task.set_condition(my_condition)\n",
"\n",
"# Execute the task\n",
"print(\"Executing task...\")\n",
"task.run()\n",
"\n",
"# Check if the task is completed\n",
"if task.is_completed():\n",
" print(\"Task completed\")\n",
"else:\n",
" print(\"Task not completed\")\n",
"\n",
"# Output the result of the task\n",
"print(f\"Task result: {task.result}\")"
],
"metadata": {
"id": "kRIhX_Ze6ZO2"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Multi-Agent Orchestration\n",
"\n",
"## SequentialWorkflow"
],
"metadata": {
"id": "W2i6YLUN6fZg"
}
},
{
"cell_type": "code",
"source": [
"from swarms import Agent, SequentialWorkflow, Anthropic\n",
"\n",
"\n",
"# Initialize the language model agent (e.g., GPT-3)\n",
"llm = Anthropic()\n",
"\n",
"# Initialize agents for individual tasks\n",
"agent1 = Agent(\n",
" agent_name=\"Blog generator\",\n",
" system_prompt=\"Generate a blog post like stephen king\",\n",
" llm=llm,\n",
" max_loops=1,\n",
" dashboard=False,\n",
" tools=[],\n",
")\n",
"agent2 = Agent(\n",
" agent_name=\"summarizer\",\n",
" system_prompt=\"Sumamrize the blog post\",\n",
" llm=llm,\n",
" max_loops=1,\n",
" dashboard=False,\n",
" tools=[],\n",
")\n",
"\n",
"# Create the Sequential workflow\n",
"workflow = SequentialWorkflow(\n",
" agents=[agent1, agent2], max_loops=1, verbose=False\n",
")\n",
"\n",
"# Run the workflow\n",
"workflow.run(\n",
" \"Generate a blog post on how swarms of agents can help businesses grow.\"\n",
")\n"
],
"metadata": {
"id": "Lpqq4SA96jo3"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## ConcurrentWorkflow"
],
"metadata": {
"id": "CF4kbMgb6wGI"
}
},
{
"cell_type": "code",
"source": [
"import os\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"from swarms import Agent, ConcurrentWorkflow, OpenAIChat, Task\n",
"\n",
"# Load environment variables from .env file\n",
"load_dotenv()\n",
"\n",
"# Load environment variables\n",
"llm = OpenAIChat(openai_api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
"agent = Agent(llm=llm, max_loops=1)\n",
"\n",
"# Create a workflow\n",
"workflow = ConcurrentWorkflow(max_workers=5)\n",
"\n",
"# Create tasks\n",
"task1 = Task(agent, \"What's the weather in miami\")\n",
"task2 = Task(agent, \"What's the weather in new york\")\n",
"task3 = Task(agent, \"What's the weather in london\")\n",
"\n",
"# Add tasks to the workflow\n",
"workflow.add(tasks=[task1, task2, task3])\n",
"\n",
"# Run the workflow\n",
"workflow.run()"
],
"metadata": {
"id": "Fl2Oambn6z5v"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## RecursiveWorkflow\n"
],
"metadata": {
"id": "Cw0i-5Nn7Q83"
}
},
{
"cell_type": "code",
"source": [
"import os\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"from swarms import Agent, OpenAIChat, RecursiveWorkflow, Task\n",
"\n",
"# Load environment variables from .env file\n",
"load_dotenv()\n",
"\n",
"# Load environment variables\n",
"llm = OpenAIChat(openai_api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
"agent = Agent(llm=llm, max_loops=1)\n",
"\n",
"# Create a workflow\n",
"workflow = RecursiveWorkflow(stop_token=\"<DONE>\")\n",
"\n",
"# Create tasks\n",
"task1 = Task(agent, \"What's the weather in miami\")\n",
"task2 = Task(agent, \"What's the weather in new york\")\n",
"task3 = Task(agent, \"What's the weather in london\")\n",
"\n",
"# Add tasks to the workflow\n",
"workflow.add(task1)\n",
"workflow.add(task2)\n",
"workflow.add(task3)\n",
"\n",
"# Run the workflow\n",
"workflow.run()"
],
"metadata": {
"id": "7Pri4tNL7U-y"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## SwarmNetwork"
],
"metadata": {
"id": "8f-9tgtT7k7D"
}
},
{
"cell_type": "code",
"source": [
"import os\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"# Import the OpenAIChat model and the Agent struct\n",
"from swarms import Agent, OpenAIChat, SwarmNetwork\n",
"\n",
"# Load the environment variables\n",
"load_dotenv()\n",
"\n",
"# Get the API key from the environment\n",
"api_key = os.environ.get(\"OPENAI_API_KEY\")\n",
"\n",
"# Initialize the language model\n",
"llm = OpenAIChat(\n",
" temperature=0.5,\n",
" openai_api_key=api_key,\n",
")\n",
"\n",
"## Initialize the workflow\n",
"agent = Agent(llm=llm, max_loops=1, agent_name=\"Social Media Manager\")\n",
"agent2 = Agent(llm=llm, max_loops=1, agent_name=\" Product Manager\")\n",
"agent3 = Agent(llm=llm, max_loops=1, agent_name=\"SEO Manager\")\n",
"\n",
"\n",
"# Load the swarmnet with the agents\n",
"swarmnet = SwarmNetwork(\n",
" agents=[agent, agent2, agent3],\n",
")\n",
"\n",
"# List the agents in the swarm network\n",
"out = swarmnet.list_agents()\n",
"print(out)\n",
"\n",
"# Run the workflow on a task\n",
"out = swarmnet.run_single_agent(\n",
" agent2.id, \"Generate a 10,000 word blog on health and wellness.\"\n",
")\n",
"print(out)\n",
"\n",
"\n",
"# Run all the agents in the swarm network on a task\n",
"out = swarmnet.run_many_agents(\"Generate a 10,000 word blog on health and wellness.\")\n",
"print(out)"
],
"metadata": {
"id": "ttbv_Vne7oZj"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## Majority Voting"
],
"metadata": {
"id": "U-5p46vW72pS"
}
},
{
"cell_type": "code",
"source": [
"from swarms import Agent, MajorityVoting, ChromaDB, Anthropic\n",
"\n",
"# Initialize the llm\n",
"llm = Anthropic()\n",
"\n",
"# Agents\n",
"agent1 = Agent(\n",
" llm = llm,\n",
" system_prompt=\"You are the leader of the Progressive Party. What is your stance on healthcare?\",\n",
" agent_name=\"Progressive Leader\",\n",
" agent_description=\"Leader of the Progressive Party\",\n",
" long_term_memory=ChromaDB(),\n",
" max_steps=1,\n",
")\n",
"\n",
"agent2 = Agent(\n",
" llm=llm,\n",
" agent_name=\"Conservative Leader\",\n",
" agent_description=\"Leader of the Conservative Party\",\n",
" long_term_memory=ChromaDB(),\n",
" max_steps=1,\n",
")\n",
"\n",
"agent3 = Agent(\n",
" llm=llm,\n",
" agent_name=\"Libertarian Leader\",\n",
" agent_description=\"Leader of the Libertarian Party\",\n",
" long_term_memory=ChromaDB(),\n",
" max_steps=1,\n",
")\n",
"\n",
"# Initialize the majority voting\n",
"mv = MajorityVoting(\n",
" agents=[agent1, agent2, agent3],\n",
" output_parser=llm.majority_voting,\n",
" autosave=False,\n",
" verbose=True,\n",
")\n",
"\n",
"\n",
"# Start the majority voting\n",
"mv.run(\"What is your stance on healthcare?\")"
],
"metadata": {
"id": "ZaRfyNCH773B"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Real-World Deployment\n",
"\n",
"## Multi-Agent Swarm for Logistics"
],
"metadata": {
"id": "-uABoH4Y8Zkh"
}
},
{
"cell_type": "code",
"source": [
"import os\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"from swarms.models import GPT4VisionAPI\n",
"from swarms.prompts.logistics import (\n",
" Efficiency_Agent_Prompt,\n",
" Health_Security_Agent_Prompt,\n",
" Productivity_Agent_Prompt,\n",
" Quality_Control_Agent_Prompt,\n",
" Safety_Agent_Prompt,\n",
" Security_Agent_Prompt,\n",
" Sustainability_Agent_Prompt,\n",
")\n",
"from swarms.structs import Agent\n",
"\n",
"# Load ENV\n",
"load_dotenv()\n",
"api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"# GPT4VisionAPI\n",
"llm = GPT4VisionAPI(openai_api_key=api_key)\n",
"\n",
"# Image for analysis\n",
"factory_image = \"factory_image1.jpg\"\n",
"\n",
"# Initialize agents with respective prompts\n",
"health_security_agent = Agent(\n",
" llm=llm,\n",
" sop=Health_Security_Agent_Prompt,\n",
" max_loops=1,\n",
" multi_modal=True,\n",
")\n",
"\n",
"# Quality control agent\n",
"quality_control_agent = Agent(\n",
" llm=llm,\n",
" sop=Quality_Control_Agent_Prompt,\n",
" max_loops=1,\n",
" multi_modal=True,\n",
")\n",
"\n",
"\n",
"# Productivity Agent\n",
"productivity_agent = Agent(\n",
" llm=llm,\n",
" sop=Productivity_Agent_Prompt,\n",
" max_loops=1,\n",
" multi_modal=True,\n",
")\n",
"\n",
"# Initiailize safety agent\n",
"safety_agent = Agent(llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True)\n",
"\n",
"# Init the security agent\n",
"security_agent = Agent(\n",
" llm=llm, sop=Security_Agent_Prompt, max_loops=1, multi_modal=True\n",
")\n",
"\n",
"\n",
"# Initialize sustainability agent\n",
"sustainability_agent = Agent(\n",
" llm=llm,\n",
" sop=Sustainability_Agent_Prompt,\n",
" max_loops=1,\n",
" multi_modal=True,\n",
")\n",
"\n",
"\n",
"# Initialize efficincy agent\n",
"efficiency_agent = Agent(\n",
" llm=llm,\n",
" sop=Efficiency_Agent_Prompt,\n",
" max_loops=1,\n",
" multi_modal=True,\n",
")\n",
"\n",
"# Run agents with respective tasks on the same image\n",
"health_analysis = health_security_agent.run(\n",
" \"Analyze the safety of this factory\", factory_image\n",
")\n",
"quality_analysis = quality_control_agent.run(\n",
" \"Examine product quality in the factory\", factory_image\n",
")\n",
"productivity_analysis = productivity_agent.run(\n",
" \"Evaluate factory productivity\", factory_image\n",
")\n",
"safety_analysis = safety_agent.run(\n",
" \"Inspect the factory's adherence to safety standards\",\n",
" factory_image,\n",
")\n",
"security_analysis = security_agent.run(\n",
" \"Assess the factory's security measures and systems\",\n",
" factory_image,\n",
")\n",
"sustainability_analysis = sustainability_agent.run(\n",
" \"Examine the factory's sustainability practices\", factory_image\n",
")\n",
"efficiency_analysis = efficiency_agent.run(\n",
" \"Analyze the efficiency of the factory's manufacturing process\",\n",
" factory_image,\n",
")"
],
"metadata": {
"id": "PszQmAoG8gI7"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Build your own LLMs, Agents, and Swarms!\n",
"\n",
"## Swarms Compliant Model Interface"
],
"metadata": {
"id": "2x-lo4hr89E1"
}
},
{
"cell_type": "code",
"source": [
"from swarms import BaseLLM\n",
"\n",
"class vLLMLM(BaseLLM):\n",
" def __init__(self, model_name='default_model', tensor_parallel_size=1, *args, **kwargs):\n",
" super().__init__(*args, **kwargs)\n",
" self.model_name = model_name\n",
" self.tensor_parallel_size = tensor_parallel_size\n",
" # Add any additional initialization here\n",
"\n",
" def run(self, task: str):\n",
" pass\n",
"\n",
"# Example\n",
"model = vLLMLM(\"mistral\")\n",
"\n",
"# Run the model\n",
"out = model(\"Analyze these financial documents and summarize of them\")\n",
"print(out)\n"
],
"metadata": {
"id": "dyaoJKYl9FIp"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## Swarms Compliant Agent Interface\n",
"\n",
"This is a non-running example."
],
"metadata": {
"id": "FD1Ro1O59MUF"
}
},
{
"cell_type": "code",
"source": [
"from swarms import Agent\n",
"\n",
"\n",
"class MyCustomAgent(Agent):\n",
"\n",
"    def __init__(self, *args, **kwargs):\n",
"\n",
"        super().__init__(*args, **kwargs)\n",
"\n",
"        # Custom initialization logic\n",
"\n",
"    def custom_method(self, *args, **kwargs):\n",
"\n",
"        # Implement custom logic here\n",
"\n",
"        pass\n",
"\n",
"    def run(self, task, *args, **kwargs):\n",
"\n",
"        # Customize the run method\n",
"\n",
"        response = super().run(task, *args, **kwargs)\n",
"\n",
"        # Additional custom logic\n",
"\n",
"        return response`\n",
"\n",
"# Model\n",
"agent = MyCustomAgent()\n",
"\n",
"# Run the agent\n",
"out = agent(\"Analyze and summarize these financial documents: \")\n",
"print(out)\n"
],
"metadata": {
"id": "tM3P3Uk39Q9y"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## Compliant Interface for Multi-Agent Collaboration\n",
"\n",
"This is a non-running example."
],
"metadata": {
"id": "fhbgPU7X9iyB"
}
},
{
"cell_type": "code",
"source": [
"from swarms import AutoSwarm, AutoSwarmRouter, BaseSwarm\n",
"\n",
"\n",
"# Build your own Swarm\n",
"class MySwarm(BaseSwarm):\n",
" def __init__(self, name=\"kyegomez/myswarm\", *args, **kwargs):\n",
" super().__init__(*args, **kwargs)\n",
" self.name = name\n",
"\n",
" def run(self, task: str, *args, **kwargs):\n",
" # Add your multi-agent logic here\n",
" # agent 1\n",
" # agent 2\n",
" # agent 3\n",
" return \"output of the swarm\"\n",
"\n",
"\n",
"# Add your custom swarm to the AutoSwarmRouter\n",
"router = AutoSwarmRouter(\n",
" swarms=[MySwarm]\n",
")\n",
"\n",
"\n",
"# Create an AutoSwarm instance\n",
"autoswarm = AutoSwarm(\n",
" name=\"kyegomez/myswarm\",\n",
" description=\"A simple API to build and run swarms\",\n",
" verbose=True,\n",
" router=router,\n",
")\n",
"\n",
"\n",
"# Run the AutoSwarm\n",
"autoswarm.run(\"Analyze these financial data and give me a summary\")\n",
"\n"
],
"metadata": {
"id": "q_OcaKPo9nnI"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# AgentRearrange"
],
"metadata": {
"id": "XkSjPre39x5S"
}
},
{
"cell_type": "code",
"source": [
"from swarms import Agent, AgentRearrange, rearrange, Anthropic\n",
"\n",
"\n",
"# Initialize the director agent\n",
"\n",
"director = Agent(\n",
" agent_name=\"Director\",\n",
" system_prompt=\"Directs the tasks for the workers\",\n",
" llm=Anthropic(),\n",
" max_loops=1,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"director.json\",\n",
")\n",
"\n",
"\n",
"# Initialize worker 1\n",
"\n",
"worker1 = Agent(\n",
" agent_name=\"Worker1\",\n",
" system_prompt=\"Generates a transcript for a youtube video on what swarms are\",\n",
" llm=Anthropic(),\n",
" max_loops=1,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"worker1.json\",\n",
")\n",
"\n",
"\n",
"# Initialize worker 2\n",
"worker2 = Agent(\n",
" agent_name=\"Worker2\",\n",
" system_prompt=\"Summarizes the transcript generated by Worker1\",\n",
" llm=Anthropic(),\n",
" max_loops=1,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"worker2.json\",\n",
")\n",
"\n",
"\n",
"# Create a list of agents\n",
"agents = [director, worker1, worker2]\n",
"\n",
"# Define the flow pattern\n",
"flow = \"Director -> Worker1 -> Worker2\"\n",
"\n",
"# Using AgentRearrange class\n",
"agent_system = AgentRearrange(agents=agents, flow=flow)\n",
"output = agent_system.run(\n",
" \"Create a format to express and communicate swarms of llms in a structured manner for youtube\"\n",
")\n",
"print(output)\n",
"\n",
"\n",
"# Using rearrange function\n",
"output = rearrange(\n",
" agents,\n",
" flow,\n",
" \"Create a format to express and communicate swarms of llms in a structured manner for youtube\",\n",
")\n",
"\n",
"print(output)\n"
],
"metadata": {
"id": "kCsf1Vwz92Kx"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# HierarhicalSwarm\n",
"\n",
"coming soon"
],
"metadata": {
"id": "yRldmkep-Hui"
}
},
{
"cell_type": "markdown",
"source": [
"## AgentLoadBalancer\n",
"\n",
"Coming soon"
],
"metadata": {
"id": "g-9runeE-N77"
}
},
{
"cell_type": "markdown",
"source": [
"## GraphSwarm\n",
"\n",
"Coming soon"
],
"metadata": {
"id": "4iT48QMq-TS-"
}
},
{
"cell_type": "markdown",
"source": [
"## MixtureOfAgents"
],
"metadata": {
"id": "GzAFomOs-X-G"
}
},
{
"cell_type": "code",
"source": [
"from swarms import Agent, OpenAIChat, MixtureOfAgents\n",
"\n",
"# Initialize the director agent\n",
"director = Agent(\n",
" agent_name=\"Director\",\n",
" system_prompt=\"Directs the tasks for the accountants\",\n",
" llm=OpenAIChat(),\n",
" max_loops=1,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"director.json\",\n",
")\n",
"\n",
"# Initialize accountant 1\n",
"accountant1 = Agent(\n",
" agent_name=\"Accountant1\",\n",
" system_prompt=\"Prepares financial statements\",\n",
" llm=OpenAIChat(),\n",
" max_loops=1,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"accountant1.json\",\n",
")\n",
"\n",
"# Initialize accountant 2\n",
"accountant2 = Agent(\n",
" agent_name=\"Accountant2\",\n",
" system_prompt=\"Audits financial records\",\n",
" llm=OpenAIChat(),\n",
" max_loops=1,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"accountant2.json\",\n",
")\n",
"\n",
"# Create a list of agents\n",
"agents = [director, accountant1, accountant2]\n",
"\n",
"\n",
"# Swarm\n",
"swarm = MixtureOfAgents(\n",
" name=\"Mixture of Accountants\",\n",
" agents=agents,\n",
" layers=3,\n",
" final_agent=director,\n",
")\n",
"\n",
"\n",
"# Run the swarm\n",
"out = swarm.run(\"Prepare financial statements and audit financial records\")\n",
"print(out)"
],
"metadata": {
"id": "Y_jyxKJf-bLe"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [],
"metadata": {
"id": "2KRVmRAI-hpU"
}
}
],
"metadata": {
"colab": {
"gpuType": "T4",
"private_outputs": true,
"provenance": [],
"include_colab_link": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}