Deleting streaming_on argument

pydantic_validation
Nicolas Nahas 9 months ago
parent 38f4989384
commit 72ac0aab14

@ -91,7 +91,6 @@ agent = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",
@ -157,7 +156,6 @@ agent = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",
@ -287,7 +285,6 @@ agent = Agent(
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,
@ -397,7 +394,6 @@ agent = Agent(
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,
@ -453,7 +449,6 @@ agent = Agent(
max_loops=3, max_loops=3,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
interactive=True, interactive=True,
# Set the output type to the tool schema which is a BaseModel # Set the output type to the tool schema which is a BaseModel
@ -722,7 +717,6 @@ director = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -738,7 +732,6 @@ worker1 = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -753,7 +746,6 @@ worker2 = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -836,7 +828,6 @@ director = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -850,7 +841,6 @@ accountant1 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -864,7 +854,6 @@ accountant2 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",

@ -129,7 +129,7 @@ agent = Agent(
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
interactive=False, interactive=False,
# Set the output type to the tool schema which is a BaseModel # Set the output type to the tool schema which is a BaseModel
@ -618,7 +618,6 @@ worker_agent = WorkerAgent(
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,

@ -48,7 +48,6 @@ Swarm Agent is a powerful autonomous agent framework designed to connect Languag
| `preset_stopping_token` | A boolean indicating whether the agent should use a preset stopping token. | | `preset_stopping_token` | A boolean indicating whether the agent should use a preset stopping token. |
| `traceback` | An object used for traceback handling. | | `traceback` | An object used for traceback handling. |
| `traceback_handlers` | A list of traceback handlers. | | `traceback_handlers` | A list of traceback handlers. |
| `streaming_on` | A boolean indicating whether the agent should stream its responses. |
| `docs` | A list of document paths or contents to be ingested. | | `docs` | A list of document paths or contents to be ingested. |
| `docs_folder` | The path to a folder containing documents to be ingested. | | `docs_folder` | The path to a folder containing documents to be ingested. |
| `verbose` | A boolean indicating whether the agent should print verbose output. | | `verbose` | A boolean indicating whether the agent should print verbose output. |

@ -123,7 +123,6 @@ director = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -137,7 +136,6 @@ worker1 = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -151,7 +149,6 @@ worker2 = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",

@ -160,7 +160,6 @@ growth_agent1 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="marketing_specialist.json", saved_state_path="marketing_specialist.json",
stopping_token="Stop!", stopping_token="Stop!",
interactive=True, interactive=True,
@ -176,7 +175,6 @@ growth_agent2 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="sales_specialist.json", saved_state_path="sales_specialist.json",
stopping_token="Stop!", stopping_token="Stop!",
interactive=True, interactive=True,
@ -192,7 +190,6 @@ growth_agent3 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="product_development_specialist.json", saved_state_path="product_development_specialist.json",
stopping_token="Stop!", stopping_token="Stop!",
interactive=True, interactive=True,
@ -208,7 +205,6 @@ growth_agent4 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="customer_service_specialist.json", saved_state_path="customer_service_specialist.json",
stopping_token="Stop!", stopping_token="Stop!",
interactive=True, interactive=True,

@ -241,7 +241,6 @@ agent = Agent(
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,
@ -297,7 +296,6 @@ agent = Agent(
max_loops=3, max_loops=3,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
interactive=True, interactive=True,
# Set the output type to the tool schema which is a BaseModel # Set the output type to the tool schema which is a BaseModel

@ -95,7 +95,6 @@ agents = [
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,
@ -112,7 +111,6 @@ agents = [
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,
@ -129,7 +127,6 @@ agents = [
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,
@ -165,7 +162,6 @@ agents = [
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,
@ -182,7 +178,6 @@ agents = [
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,
@ -199,7 +194,6 @@ agents = [
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,

@ -178,7 +178,6 @@ director = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -192,7 +191,6 @@ accountant1 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -206,7 +204,6 @@ accountant2 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -235,7 +232,6 @@ director = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -249,7 +245,6 @@ accountant1 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -263,7 +258,6 @@ accountant2 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -296,7 +290,6 @@ director = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -310,7 +303,6 @@ accountant1 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -324,7 +316,6 @@ accountant2 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",

@ -66,7 +66,6 @@ sales_agent1 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
context_length=1000, context_length=1000,
) )
@ -79,7 +78,6 @@ sales_agent2 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
context_length=1000, context_length=1000,
) )
@ -92,7 +90,6 @@ sales_agent3 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
context_length=1000, context_length=1000,
) )

@ -68,7 +68,6 @@ This endpoint handles the completion request for an agent configured with the gi
"dynamic_temperature_enabled": false, "dynamic_temperature_enabled": false,
"dashboard": false, "dashboard": false,
"verbose": false, "verbose": false,
"streaming_on": true,
"saved_state_path": null, "saved_state_path": null,
"sop": null, "sop": null,
"sop_list": null, "sop_list": null,
@ -93,7 +92,6 @@ This endpoint handles the completion request for an agent configured with the gi
"dynamic_temperature_enabled": false, "dynamic_temperature_enabled": false,
"dashboard": false, "dashboard": false,
"verbose": false, "verbose": false,
"streaming_on": true,
"saved_state_path": null, "saved_state_path": null,
"sop": null, "sop": null,
"sop_list": null, "sop_list": null,
@ -140,7 +138,6 @@ class AgentInput(BaseModel):
dynamic_temperature_enabled: bool = False dynamic_temperature_enabled: bool = False
dashboard: bool = False dashboard: bool = False
verbose: bool = False verbose: bool = False
streaming_on: bool = True
saved_state_path: str = None saved_state_path: str = None
sop: str = None sop: str = None
sop_list: List[str] = None sop_list: List[str] = None
@ -171,7 +168,6 @@ The `AgentInput` class defines the structure of the input data required to confi
| `dynamic_temperature_enabled` | `bool` | `False` | Whether dynamic temperature adjustment is enabled. | | `dynamic_temperature_enabled` | `bool` | `False` | Whether dynamic temperature adjustment is enabled. |
| `dashboard` | `bool` | `False` | Whether to enable the dashboard feature. | | `dashboard` | `bool` | `False` | Whether to enable the dashboard feature. |
| `verbose` | `bool` | `False` | Whether to enable verbose logging. | | `verbose` | `bool` | `False` | Whether to enable verbose logging. |
| `streaming_on` | `bool` | `True` | Whether to enable streaming of responses. |
| `saved_state_path` | `str` or `None` | `None` | Path to save the agent's state. | | `saved_state_path` | `str` or `None` | `None` | Path to save the agent's state. |
| `sop` | `str` or `None` | `None` | Standard operating procedures for the agent. | | `sop` | `str` or `None` | `None` | Standard operating procedures for the agent. |
| `sop_list` | `List[str]` or `None` | `None` | A list of standard operating procedures. | | `sop_list` | `List[str]` or `None` | `None` | A list of standard operating procedures. |

@ -15,7 +15,6 @@ agent = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",

@ -16,7 +16,6 @@ fiancial_analyst = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",
@ -50,7 +49,6 @@ fiancial_director = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",

@ -41,7 +41,6 @@ agent = Agent(
), ),
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
streaming_on=True,
verbose=True, verbose=True,
# List of schemas that the agent can handle # List of schemas that the agent can handle
list_base_models=[Schema], list_base_models=[Schema],

@ -27,7 +27,6 @@ agent = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",

@ -11,7 +11,6 @@ agent = Agent(
), ),
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,

@ -92,7 +92,6 @@ agent = Agent(
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,

@ -16,7 +16,6 @@ agent = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",

@ -22,7 +22,6 @@ agent = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",

@ -30,7 +30,6 @@ agent1 = Agent(
metadata="json", metadata="json",
function_calling_format_type="OpenAI", function_calling_format_type="OpenAI",
function_calling_type="json", function_calling_type="json",
streaming_on=True,
tools=[browser_automation], tools=[browser_automation],
) )
@ -43,7 +42,6 @@ agent2 = Agent(
metadata="json", metadata="json",
function_calling_format_type="OpenAI", function_calling_format_type="OpenAI",
function_calling_type="json", function_calling_type="json",
streaming_on=True,
tools=[browser_automation], tools=[browser_automation],
) )
@ -56,7 +54,6 @@ agent3 = Agent(
metadata="json", metadata="json",
function_calling_format_type="OpenAI", function_calling_format_type="OpenAI",
function_calling_type="json", function_calling_type="json",
streaming_on=True,
tools=[browser_automation], tools=[browser_automation],
) )

@ -88,7 +88,6 @@ agent = Agent(
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,

@ -20,7 +20,6 @@ agent = Agent(
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
tools=[search_api], tools=[search_api],

@ -147,7 +147,6 @@ def select_agent_and_send_task(name: str = None, task: str = None):
max_loops=2, max_loops=2,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
output_type=str, output_type=str,
metadata_output_type="json", metadata_output_type="json",
@ -197,7 +196,6 @@ agent = Agent(
interactive=True, interactive=True,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
# interactive=True, # interactive=True,
# tools=[search_weather], # or list of tools # tools=[search_weather], # or list of tools
verbose=True, verbose=True,

@ -189,7 +189,6 @@
" max_loops=1,\n", " max_loops=1,\n",
" autosave=True,\n", " autosave=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n", " verbose=True,\n",
" interactive=False,\n", " interactive=False,\n",
" # Set the output type to the tool schema which is a BaseModel\n", " # Set the output type to the tool schema which is a BaseModel\n",
@ -803,7 +802,6 @@
" max_loops=\"auto\",\n", " max_loops=\"auto\",\n",
" autosave=True,\n", " autosave=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n", " verbose=True,\n",
" stopping_token=\"<DONE>\",\n", " stopping_token=\"<DONE>\",\n",
" interactive=True,\n", " interactive=True,\n",

@ -19,7 +19,6 @@ agent1 = Agent(
metadata="json", metadata="json",
function_calling_format_type="OpenAI", function_calling_format_type="OpenAI",
function_calling_type="json", function_calling_type="json",
streaming_on=True,
) )
agent2 = Agent( agent2 = Agent(
@ -32,7 +31,6 @@ agent2 = Agent(
metadata="json", metadata="json",
function_calling_format_type="OpenAI", function_calling_format_type="OpenAI",
function_calling_type="json", function_calling_type="json",
streaming_on=True,
) )
agent3 = Agent( agent3 = Agent(
@ -45,7 +43,6 @@ agent3 = Agent(
metadata="json", metadata="json",
function_calling_format_type="OpenAI", function_calling_format_type="OpenAI",
function_calling_type="json", function_calling_type="json",
streaming_on=True,
) )

@ -1,56 +1,39 @@
{ {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"machine_shape": "hm",
"gpuType": "L4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [ "cells": [
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {
"id": "Qf8eZIT71wba"
},
"source": [ "source": [
"# Entry for SwarmsHackathon 2024\n", "# Entry for SwarmsHackathon 2024\n",
"\n" "\n"
], ]
"metadata": {
"id": "Qf8eZIT71wba"
}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"source": [
"## Install Swarms"
],
"metadata": { "metadata": {
"id": "-rBXNMWV4EWN" "id": "-rBXNMWV4EWN"
} },
"source": [
"## Install Swarms"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 1,
"metadata": { "metadata": {
"id": "w4FoSEyP1q_x",
"colab": { "colab": {
"base_uri": "https://localhost:8080/", "base_uri": "https://localhost:8080/",
"height": 1000 "height": 1000
}, },
"id": "w4FoSEyP1q_x",
"outputId": "ea6b15e7-c53c-47aa-86c6-b24d4aff041b" "outputId": "ea6b15e7-c53c-47aa-86c6-b24d4aff041b"
}, },
"outputs": [ "outputs": [
{ {
"output_type": "stream",
"name": "stdout", "name": "stdout",
"output_type": "stream",
"text": [ "text": [
"Collecting swarms\n", "Collecting swarms\n",
" Downloading swarms-5.1.4-py3-none-any.whl (338 kB)\n", " Downloading swarms-5.1.4-py3-none-any.whl (338 kB)\n",
@ -214,19 +197,19 @@
] ]
}, },
{ {
"output_type": "display_data",
"data": { "data": {
"application/vnd.colab-display-data+json": { "application/vnd.colab-display-data+json": {
"id": "43b664ed28b2464da4f7c30cb0f343ce",
"pip_warning": { "pip_warning": {
"packages": [ "packages": [
"PIL", "PIL",
"asyncio" "asyncio"
] ]
}, }
"id": "43b664ed28b2464da4f7c30cb0f343ce"
} }
}, },
"metadata": {} "metadata": {},
"output_type": "display_data"
} }
], ],
"source": [ "source": [
@ -235,60 +218,57 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"source": [
"Import keys"
],
"metadata": { "metadata": {
"id": "QTMXxRxw7yR5" "id": "QTMXxRxw7yR5"
} },
"source": [
"Import keys"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "execution_count": 1,
"from google.colab import userdata\n",
"anthropic_api_key = userdata.get('ANTHROPIC_API_KEY')"
],
"metadata": { "metadata": {
"id": "lzSnwHw-7z8B" "id": "lzSnwHw-7z8B"
}, },
"execution_count": 1, "outputs": [],
"outputs": [] "source": [
"from google.colab import userdata\n",
"anthropic_api_key = userdata.get('ANTHROPIC_API_KEY')"
]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"source": [
"## Devin like"
],
"metadata": { "metadata": {
"id": "eD0PkNm25SVT" "id": "eD0PkNm25SVT"
} },
"source": [
"## Devin like"
]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"source": [
"This example requires the anthropic library which is not installed by default."
],
"metadata": { "metadata": {
"id": "0Shm1vrS-YFZ" "id": "0Shm1vrS-YFZ"
} },
"source": [
"This example requires the anthropic library which is not installed by default."
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "execution_count": 2,
"!pip install anthropic"
],
"metadata": { "metadata": {
"id": "aZG6eSjr-U7J",
"colab": { "colab": {
"base_uri": "https://localhost:8080/" "base_uri": "https://localhost:8080/"
}, },
"id": "aZG6eSjr-U7J",
"outputId": "b5460b70-5db9-45d7-d66a-d2eb596b86b7" "outputId": "b5460b70-5db9-45d7-d66a-d2eb596b86b7"
}, },
"execution_count": 2,
"outputs": [ "outputs": [
{ {
"output_type": "stream",
"name": "stdout", "name": "stdout",
"output_type": "stream",
"text": [ "text": [
"Collecting anthropic\n", "Collecting anthropic\n",
" Using cached anthropic-0.28.0-py3-none-any.whl (862 kB)\n", " Using cached anthropic-0.28.0-py3-none-any.whl (862 kB)\n",
@ -324,23 +304,26 @@
"Successfully installed anthropic-0.28.0 h11-0.14.0 httpcore-1.0.5 httpx-0.27.0 jiter-0.4.1\n" "Successfully installed anthropic-0.28.0 h11-0.14.0 httpcore-1.0.5 httpx-0.27.0 jiter-0.4.1\n"
] ]
} }
],
"source": [
"!pip install anthropic"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": 3,
"metadata": { "metadata": {
"id": "NyroG92H1m2G",
"colab": { "colab": {
"base_uri": "https://localhost:8080/", "base_uri": "https://localhost:8080/",
"height": 1000 "height": 1000
}, },
"id": "NyroG92H1m2G",
"outputId": "69f4ff8b-39c7-41db-c876-4694336d812e" "outputId": "69f4ff8b-39c7-41db-c876-4694336d812e"
}, },
"outputs": [ "outputs": [
{ {
"output_type": "stream",
"name": "stderr", "name": "stderr",
"output_type": "stream",
"text": [ "text": [
"\u001b[32m2024-06-02T20:32:00.407576+0000\u001b[0m \u001b[1mNumber of tools: 4\u001b[0m\n", "\u001b[32m2024-06-02T20:32:00.407576+0000\u001b[0m \u001b[1mNumber of tools: 4\u001b[0m\n",
"\u001b[32m2024-06-02T20:32:00.407998+0000\u001b[0m \u001b[1mTools provided, Automatically converting to OpenAI function\u001b[0m\n", "\u001b[32m2024-06-02T20:32:00.407998+0000\u001b[0m \u001b[1mTools provided, Automatically converting to OpenAI function\u001b[0m\n",
@ -351,8 +334,8 @@
] ]
}, },
{ {
"output_type": "stream",
"name": "stdout", "name": "stdout",
"output_type": "stream",
"text": [ "text": [
"Initializing Autonomous Agent Devin...\n", "Initializing Autonomous Agent Devin...\n",
"Autonomous Agent Activated.\n", "Autonomous Agent Activated.\n",
@ -506,9 +489,9 @@
] ]
}, },
{ {
"output_type": "error",
"ename": "KeyboardInterrupt", "ename": "KeyboardInterrupt",
"evalue": "Interrupted by user", "evalue": "Interrupted by user",
"output_type": "error",
"traceback": [ "traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
@ -613,7 +596,6 @@
" max_loops=\"auto\",\n", " max_loops=\"auto\",\n",
" autosave=True,\n", " autosave=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n", " verbose=True,\n",
" stopping_token=\"<DONE>\",\n", " stopping_token=\"<DONE>\",\n",
" interactive=True,\n", " interactive=True,\n",
@ -629,71 +611,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "execution_count": 7,
"from swarms import Agent, AgentRearrange, rearrange\n",
"from typing import List\n",
"\n",
"llm = Anthropic(\n",
" temperature=0.1,\n",
" anthropic_api_key = anthropic_api_key\n",
")\n",
"# Initialize the director agent\n",
"director = Agent(\n",
" agent_name=\"Director\",\n",
" system_prompt=\"Directs the tasks for the workers\",\n",
" llm=llm,\n",
" max_loops=1,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"director.json\",\n",
")\n",
"\n",
"# Initialize worker 1\n",
"worker1 = Agent(\n",
" agent_name=\"Worker1\",\n",
" system_prompt=\"Generates a transcript for a youtube video on what swarms are\",\n",
" llm=llm,\n",
" max_loops=1,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"worker1.json\",\n",
")\n",
"\n",
"# Initialize worker 2\n",
"worker2 = Agent(\n",
" agent_name=\"Worker2\",\n",
" system_prompt=\"Summarizes the transcript generated by Worker1\",\n",
" llm=llm,\n",
" max_loops=1,\n",
" dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"worker2.json\",\n",
")\n",
"\n",
"# Create a list of agents\n",
"agents = [director, worker1, worker2]\n",
"\n",
"# Define the flow pattern\n",
"flow = \"Director -> Worker1 -> Worker2\"\n",
"\n",
"# Using AgentRearrange class\n",
"agent_system = AgentRearrange(agents=agents, flow=flow)\n",
"output = agent_system.run(\"Create a format to express and communicate swarms of llms in a structured manner for youtube\")\n",
"print(output)\n",
"\n",
"# Using rearrange function\n",
"output = rearrange(agents, flow, \"Create a format to express and communicate swarms of llms in a structured manner for youtube\")\n",
"print(output)"
],
"metadata": { "metadata": {
"colab": { "colab": {
"base_uri": "https://localhost:8080/" "base_uri": "https://localhost:8080/"
@ -701,19 +619,18 @@
"id": "1j3RgVk1ol6G", "id": "1j3RgVk1ol6G",
"outputId": "a365266e-7c11-4c2d-9e31-19842483b165" "outputId": "a365266e-7c11-4c2d-9e31-19842483b165"
}, },
"execution_count": 7,
"outputs": [ "outputs": [
{ {
"output_type": "stream",
"name": "stderr", "name": "stderr",
"output_type": "stream",
"text": [ "text": [
"\u001b[32m2024-06-02T20:34:54.149688+0000\u001b[0m \u001b[1mAgentRearrange initialized with agents: ['Director', 'Worker1', 'Worker2']\u001b[0m\n", "\u001b[32m2024-06-02T20:34:54.149688+0000\u001b[0m \u001b[1mAgentRearrange initialized with agents: ['Director', 'Worker1', 'Worker2']\u001b[0m\n",
"\u001b[32m2024-06-02T20:34:54.151361+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Director']\u001b[0m\n" "\u001b[32m2024-06-02T20:34:54.151361+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Director']\u001b[0m\n"
] ]
}, },
{ {
"output_type": "stream",
"name": "stdout", "name": "stdout",
"output_type": "stream",
"text": [ "text": [
"Flow is valid.\n", "Flow is valid.\n",
"Initializing Autonomous Agent Director...\n", "Initializing Autonomous Agent Director...\n",
@ -728,15 +645,15 @@
] ]
}, },
{ {
"output_type": "stream",
"name": "stderr", "name": "stderr",
"output_type": "stream",
"text": [ "text": [
"\u001b[32m2024-06-02T20:35:02.526464+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Worker1']\u001b[0m\n" "\u001b[32m2024-06-02T20:35:02.526464+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Worker1']\u001b[0m\n"
] ]
}, },
{ {
"output_type": "stream",
"name": "stdout", "name": "stdout",
"output_type": "stream",
"text": [ "text": [
"\n", "\n",
"Llm Swarm Video Format\n", "Llm Swarm Video Format\n",
@ -771,15 +688,15 @@
] ]
}, },
{ {
"output_type": "stream",
"name": "stderr", "name": "stderr",
"output_type": "stream",
"text": [ "text": [
"\u001b[32m2024-06-02T20:35:07.814536+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Worker2']\u001b[0m\n" "\u001b[32m2024-06-02T20:35:07.814536+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Worker2']\u001b[0m\n"
] ]
}, },
{ {
"output_type": "stream",
"name": "stdout", "name": "stdout",
"output_type": "stream",
"text": [ "text": [
"\n", "\n",
"[Swarm Name] Llm Swarm\n", "[Swarm Name] Llm Swarm\n",
@ -810,16 +727,16 @@
] ]
}, },
{ {
"output_type": "stream",
"name": "stderr", "name": "stderr",
"output_type": "stream",
"text": [ "text": [
"\u001b[32m2024-06-02T20:35:11.887014+0000\u001b[0m \u001b[1mAgentRearrange initialized with agents: ['Director', 'Worker1', 'Worker2']\u001b[0m\n", "\u001b[32m2024-06-02T20:35:11.887014+0000\u001b[0m \u001b[1mAgentRearrange initialized with agents: ['Director', 'Worker1', 'Worker2']\u001b[0m\n",
"\u001b[32m2024-06-02T20:35:11.889429+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Director']\u001b[0m\n" "\u001b[32m2024-06-02T20:35:11.889429+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Director']\u001b[0m\n"
] ]
}, },
{ {
"output_type": "stream",
"name": "stdout", "name": "stdout",
"output_type": "stream",
"text": [ "text": [
"\n", "\n",
"[Swarm Name] Llm Swarm\n", "[Swarm Name] Llm Swarm\n",
@ -868,15 +785,15 @@
] ]
}, },
{ {
"output_type": "stream",
"name": "stderr", "name": "stderr",
"output_type": "stream",
"text": [ "text": [
"\u001b[32m2024-06-02T20:35:18.085897+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Worker1']\u001b[0m\n" "\u001b[32m2024-06-02T20:35:18.085897+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Worker1']\u001b[0m\n"
] ]
}, },
{ {
"output_type": "stream",
"name": "stdout", "name": "stdout",
"output_type": "stream",
"text": [ "text": [
"\n", "\n",
"Llm Swarm Video Format\n", "Llm Swarm Video Format\n",
@ -912,15 +829,15 @@
] ]
}, },
{ {
"output_type": "stream",
"name": "stderr", "name": "stderr",
"output_type": "stream",
"text": [ "text": [
"\u001b[32m2024-06-02T20:35:23.508710+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Worker2']\u001b[0m\n" "\u001b[32m2024-06-02T20:35:23.508710+0000\u001b[0m \u001b[1mRunning agents sequentially: ['Worker2']\u001b[0m\n"
] ]
}, },
{ {
"output_type": "stream",
"name": "stdout", "name": "stdout",
"output_type": "stream",
"text": [ "text": [
"\n", "\n",
"[Swarm Name] Llm Swarm\n", "[Swarm Name] Llm Swarm\n",
@ -993,7 +910,86 @@
"I think focusing on presenting uplifting dialogue between AI systems is a thoughtful idea. This script outlines a respectful approach. Please let me know if you would like me to modify or expand on anything! I'm happy to help further.\n" "I think focusing on presenting uplifting dialogue between AI systems is a thoughtful idea. This script outlines a respectful approach. Please let me know if you would like me to modify or expand on anything! I'm happy to help further.\n"
] ]
} }
],
"source": [
"from swarms import Agent, AgentRearrange, rearrange\n",
"from typing import List\n",
"\n",
"llm = Anthropic(\n",
" temperature=0.1,\n",
" anthropic_api_key = anthropic_api_key\n",
")\n",
"# Initialize the director agent\n",
"director = Agent(\n",
" agent_name=\"Director\",\n",
" system_prompt=\"Directs the tasks for the workers\",\n",
" llm=llm,\n",
" max_loops=1,\n",
" dashboard=False,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"director.json\",\n",
")\n",
"\n",
"# Initialize worker 1\n",
"worker1 = Agent(\n",
" agent_name=\"Worker1\",\n",
" system_prompt=\"Generates a transcript for a youtube video on what swarms are\",\n",
" llm=llm,\n",
" max_loops=1,\n",
" dashboard=False,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"worker1.json\",\n",
")\n",
"\n",
"# Initialize worker 2\n",
"worker2 = Agent(\n",
" agent_name=\"Worker2\",\n",
" system_prompt=\"Summarizes the transcript generated by Worker1\",\n",
" llm=llm,\n",
" max_loops=1,\n",
" dashboard=False,\n",
" verbose=True,\n",
" stopping_token=\"<DONE>\",\n",
" state_save_file_type=\"json\",\n",
" saved_state_path=\"worker2.json\",\n",
")\n",
"\n",
"# Create a list of agents\n",
"agents = [director, worker1, worker2]\n",
"\n",
"# Define the flow pattern\n",
"flow = \"Director -> Worker1 -> Worker2\"\n",
"\n",
"# Using AgentRearrange class\n",
"agent_system = AgentRearrange(agents=agents, flow=flow)\n",
"output = agent_system.run(\"Create a format to express and communicate swarms of llms in a structured manner for youtube\")\n",
"print(output)\n",
"\n",
"# Using rearrange function\n",
"output = rearrange(agents, flow, \"Create a format to express and communicate swarms of llms in a structured manner for youtube\")\n",
"print(output)"
] ]
} }
] ],
"metadata": {
"accelerator": "GPU",
"colab": {
"gpuType": "L4",
"machine_shape": "hm",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
} }

@ -9,7 +9,6 @@ agent_risk_analysis = Agent(
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
) )
@ -22,7 +21,6 @@ agent_compliance_check = Agent(
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
) )
@ -35,7 +33,6 @@ agent_report_generation = Agent(
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
) )

@ -201,7 +201,6 @@
" dynamic_temperature_enabled=True,\n", " dynamic_temperature_enabled=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" verbose=True,\n", " verbose=True,\n",
" streaming_on=True,\n",
" # interactive=True, # Set to False to disable interactive mode\n", " # interactive=True, # Set to False to disable interactive mode\n",
" saved_state_path=\"accounting_agent.json\",\n", " saved_state_path=\"accounting_agent.json\",\n",
" # tools=[calculate_profit, generate_report],\n", " # tools=[calculate_profit, generate_report],\n",
@ -335,7 +334,6 @@
" dynamic_temperature_enabled=True,\n", " dynamic_temperature_enabled=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" verbose=True,\n", " verbose=True,\n",
" streaming_on=True,\n",
" # interactive=True, # Set to False to disable interactive mode\n", " # interactive=True, # Set to False to disable interactive mode\n",
" saved_state_path=f\"{name}_agent.json\",\n", " saved_state_path=f\"{name}_agent.json\",\n",
" # tools=[calculate_profit, generate_report],\n", " # tools=[calculate_profit, generate_report],\n",
@ -380,7 +378,6 @@
" dynamic_temperature_enabled=True,\n", " dynamic_temperature_enabled=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" verbose=True,\n", " verbose=True,\n",
" streaming_on=True,\n",
" # interactive=True, # Set to False to disable interactive mode\n", " # interactive=True, # Set to False to disable interactive mode\n",
" saved_state_path=\"boss_director_agent.json\",\n", " saved_state_path=\"boss_director_agent.json\",\n",
" # tools=[calculate_profit, generate_report],\n", " # tools=[calculate_profit, generate_report],\n",

@ -48,7 +48,6 @@ def create_and_execute_swarm(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
saved_state_path=f"{name}_agent.json", saved_state_path=f"{name}_agent.json",
# tools=[calculate_profit, generate_report], # tools=[calculate_profit, generate_report],
@ -81,7 +80,6 @@ planning_agent = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
saved_state_path="accounting_agent.json", saved_state_path="accounting_agent.json",
# tools=[calculate_profit, generate_report], # tools=[calculate_profit, generate_report],
@ -102,7 +100,6 @@ boss_agent_creator = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
saved_state_path="boss_director_agent.json", saved_state_path="boss_director_agent.json",
# tools=[calculate_profit, generate_report], # tools=[calculate_profit, generate_report],

@ -65,7 +65,6 @@ app_designer = Agent(
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
context_length=150000, context_length=150000,
state_save_file_type="json", state_save_file_type="json",
@ -79,7 +78,6 @@ feature_engineer = Agent(
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
context_length=150000, context_length=150000,
state_save_file_type="json", state_save_file_type="json",
@ -93,7 +91,6 @@ code_generator = Agent(
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
context_length=150000, context_length=150000,
state_save_file_type="json", state_save_file_type="json",
@ -107,7 +104,6 @@ quality_assurance = Agent(
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
context_length=150000, context_length=150000,
state_save_file_type="json", state_save_file_type="json",

@ -20,7 +20,6 @@ agent = Agent(
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
) )

@ -37,12 +37,10 @@ diagnoser_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
# streaming_on=True,
# verbose=True, # verbose=True,
# saved_state_path="diagnoser.json", # saved_state_path="diagnoser.json",
multi_modal=True, multi_modal=True,
autosave=True, autosave=True,
streaming_on=True,
) )
# Initialize Harvester Agent # Initialize Harvester Agent
@ -52,12 +50,10 @@ harvester_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
# streaming_on=True,
# verbose=True, # verbose=True,
# saved_state_path="harvester.json", # saved_state_path="harvester.json",
multi_modal=True, multi_modal=True,
autosave=True, autosave=True,
streaming_on=True,
) )
# Initialize Growth Predictor Agent # Initialize Growth Predictor Agent
@ -67,12 +63,10 @@ growth_predictor_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
# streaming_on=True,
# verbose=True, # verbose=True,
# saved_state_path="growth_predictor.json", # saved_state_path="growth_predictor.json",
multi_modal=True, multi_modal=True,
autosave=True, autosave=True,
streaming_on=True,
) )
# Initialize Treatment Recommender Agent # Initialize Treatment Recommender Agent
@ -82,12 +76,10 @@ treatment_recommender_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
# streaming_on=True,
# verbose=True, # verbose=True,
# saved_state_path="treatment_recommender.json", # saved_state_path="treatment_recommender.json",
multi_modal=True, multi_modal=True,
autosave=True, autosave=True,
streaming_on=True,
) )
# Initialize Disease Detector Agent # Initialize Disease Detector Agent
@ -97,12 +89,10 @@ disease_detector_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
# streaming_on=True,
# verbose=True, # verbose=True,
# saved_state_path="disease_detector.json", # saved_state_path="disease_detector.json",
multi_modal=True, multi_modal=True,
autosave=True, autosave=True,
streaming_on=True,
) )
agents = [ agents = [
diagnoser_agent, diagnoser_agent,

@ -31,7 +31,6 @@ diagnoser_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="diagnoser.json", # saved_state_path="diagnoser.json",
multi_modal=True, multi_modal=True,
@ -45,7 +44,6 @@ harvester_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="harvester.json", # saved_state_path="harvester.json",
multi_modal=True, multi_modal=True,
@ -59,7 +57,6 @@ growth_predictor_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="growth_predictor.json", # saved_state_path="growth_predictor.json",
multi_modal=True, multi_modal=True,
@ -73,7 +70,6 @@ treatment_recommender_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="treatment_recommender.json", # saved_state_path="treatment_recommender.json",
multi_modal=True, multi_modal=True,
@ -87,7 +83,6 @@ disease_detector_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="disease_detector.json", # saved_state_path="disease_detector.json",
multi_modal=True, multi_modal=True,

@ -110,7 +110,6 @@ twitter_agent = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="twitter_agent.json", saved_state_path="twitter_agent.json",
context_length=8192, context_length=8192,
# long_term_memory=memory, # long_term_memory=memory,
@ -126,7 +125,6 @@ linkedin_agent = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="linkedin_agent.json", saved_state_path="linkedin_agent.json",
context_length=8192, context_length=8192,
# long_term_memory=memory, # long_term_memory=memory,
@ -142,7 +140,6 @@ instagram_agent = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="instagram_agent.json", saved_state_path="instagram_agent.json",
context_length=8192, context_length=8192,
# long_term_memory=memory, # long_term_memory=memory,
@ -158,7 +155,6 @@ facebook_agent = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="facebook_agent.json", saved_state_path="facebook_agent.json",
context_length=8192, context_length=8192,
# long_term_memory=memory, # long_term_memory=memory,
@ -174,7 +170,6 @@ tiktok_agent = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="tiktok_agent.json", saved_state_path="tiktok_agent.json",
context_length=8192, context_length=8192,
# long_term_memory=memory, # long_term_memory=memory,

@ -237,7 +237,6 @@ for prompt in prompts:
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
stopping_token="<DONE>", stopping_token="<DONE>",
@ -250,7 +249,6 @@ for prompt in prompts:
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
tools=[post_to_twitter], tools=[post_to_twitter],
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
@ -264,7 +262,6 @@ for prompt in prompts:
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
stopping_token="<DONE>", stopping_token="<DONE>",
@ -278,7 +275,6 @@ for prompt in prompts:
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
stopping_token="<DONE>", stopping_token="<DONE>",
@ -291,7 +287,6 @@ for prompt in prompts:
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
stopping_token="<DONE>", stopping_token="<DONE>",
@ -304,7 +299,6 @@ for prompt in prompts:
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
stopping_token="<DONE>", stopping_token="<DONE>",
@ -317,7 +311,6 @@ for prompt in prompts:
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
stopping_token="<DONE>", stopping_token="<DONE>",
@ -330,7 +323,6 @@ for prompt in prompts:
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
stopping_token="<DONE>", stopping_token="<DONE>",
@ -347,7 +339,6 @@ final_agent = Agent(
llm=model, llm=model,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
stopping_token="<DONE>", stopping_token="<DONE>",

@ -16,7 +16,6 @@ receipt_analyzer_agent = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",
@ -48,7 +47,6 @@ analyst_agent = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",

@ -16,7 +16,6 @@ hallucinator = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
saved_state_path="hallucinator_agent.json", saved_state_path="hallucinator_agent.json",
stopping_token="Stop!", stopping_token="Stop!",
@ -196,7 +195,6 @@ agent_evaluator = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
saved_state_path="evaluator.json", saved_state_path="evaluator.json",
stopping_token="Stop!", stopping_token="Stop!",

@ -140,7 +140,6 @@ agent = Agent(
# dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",

@ -57,7 +57,6 @@ agent = Agent(
), ),
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
streaming_on=False, # TODO code breaks when this is True
verbose=True, verbose=True,
# List of schemas that the agent can handle # List of schemas that the agent can handle
list_base_models=[StockInfo], list_base_models=[StockInfo],

@ -0,0 +1,67 @@
"""
* WORKING
What this script does:
Structured output example
Requirements:
Add the folowing API key(s) in your .env file:
- OPENAI_API_KEY (this example works best with Openai bc it uses openai function calling structure)
Note:
If you are running playground examples in the project files directly (without swarms installed via PIP),
make sure to add the project root to your PYTHONPATH by running the following command in the project's root directory:
'export PYTHONPATH=$(pwd):$PYTHONPATH'
"""
################ Adding project root to PYTHONPATH ################################
# If you are running playground examples in the project files directly, use this:
import sys
import os
sys.path.insert(0, os.getcwd())
################ Adding project root to PYTHONPATH ################################
from pydantic import BaseModel, Field
from swarms import Agent, OpenAIChat
import agentops
agentops.start_session()
# Initialize the schema for the person's information
class PersonInfo(BaseModel):
"""
This is a pydantic model describing the format of a structured output
"""
name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person")
is_student: bool = Field(..., title="Whether the person is a student")
courses: list[str] = Field(
..., title="List of courses the person is taking"
)
# Initialize the agent
agent = Agent(
agent_name="Person Information Generator",
system_prompt=(
"Generate a person's information"
),
llm=OpenAIChat(),
max_loops=1,
verbose=True,
# List of pydantic models that the agent can use
list_base_models=[PersonInfo],
output_validation=True
)
# Define the task to generate a person's information
task = "Generate a person's information"
# Run the agent to generate the person's information
generated_data = agent.run(task)
# Print the generated data
print(f"Generated data: {generated_data}")

@ -6,7 +6,6 @@ agent = Agent(
max_loops="auto", max_loops="auto",
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
) )

@ -18,7 +18,6 @@ agent = Agent(
max_loops="auto", # Set the maximum number of loops to "auto" max_loops="auto", # Set the maximum number of loops to "auto"
autosave=True, # Enable autosave feature autosave=True, # Enable autosave feature
dashboard=False, # Disable the dashboard dashboard=False, # Disable the dashboard
streaming_on=True, # Enable streaming
verbose=True, # Enable verbose mode verbose=True, # Enable verbose mode
stopping_token="<DONE>", # Set the stopping token to "<DONE>" stopping_token="<DONE>", # Set the stopping token to "<DONE>"
interactive=True, # Enable interactive mode interactive=True, # Enable interactive mode

@ -6,7 +6,6 @@ agent = Agent(
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
) )

@ -13,7 +13,6 @@ growth_agent1 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="marketing_specialist.json", saved_state_path="marketing_specialist.json",
stopping_token="Stop!", stopping_token="Stop!",
interactive=True, interactive=True,
@ -29,7 +28,6 @@ growth_agent2 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="sales_specialist.json", saved_state_path="sales_specialist.json",
stopping_token="Stop!", stopping_token="Stop!",
interactive=True, interactive=True,
@ -45,7 +43,6 @@ growth_agent3 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="product_development_specialist.json", saved_state_path="product_development_specialist.json",
stopping_token="Stop!", stopping_token="Stop!",
interactive=True, interactive=True,
@ -61,7 +58,6 @@ growth_agent4 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
saved_state_path="customer_service_specialist.json", saved_state_path="customer_service_specialist.json",
stopping_token="Stop!", stopping_token="Stop!",
interactive=True, interactive=True,

@ -9,7 +9,6 @@ director = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -25,7 +24,6 @@ worker1 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -40,7 +38,6 @@ worker2 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",

@ -9,7 +9,6 @@ director = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -25,7 +24,6 @@ worker1 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -40,7 +38,6 @@ worker2 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",

@ -8,7 +8,6 @@ director = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -22,7 +21,6 @@ worker1 = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -36,7 +34,6 @@ worker2 = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",

@ -15,7 +15,6 @@ sales_agent1 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
context_length=1000, context_length=1000,
) )
@ -28,7 +27,6 @@ sales_agent2 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
context_length=1000, context_length=1000,
) )
@ -41,7 +39,6 @@ sales_agent3 = Agent(
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
context_length=1000, context_length=1000,
) )

@ -9,7 +9,6 @@ director = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -25,7 +24,6 @@ worker1 = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -40,7 +38,6 @@ worker2 = Agent(
llm=Anthropic(), llm=Anthropic(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",

@ -28,7 +28,6 @@ class MarketingSwarm(BaseSwarm):
meax_loops=1, meax_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
) )
@ -41,7 +40,6 @@ class MarketingSwarm(BaseSwarm):
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
) )
@ -54,7 +52,6 @@ class MarketingSwarm(BaseSwarm):
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
) )

@ -36,7 +36,6 @@ agent = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
saved_state_path="accounting_agent.json", saved_state_path="accounting_agent.json",
# tools=[calculate_profit, generate_report], # tools=[calculate_profit, generate_report],
@ -67,7 +66,6 @@ forecaster_agent = Agent(
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
saved_state_path="forecaster_agent.json", saved_state_path="forecaster_agent.json",
# tools=[calculate_profit, generate_report], # tools=[calculate_profit, generate_report],

@ -100,7 +100,6 @@ agent = Agent(
max_loops=1, max_loops=1,
autosave=False, autosave=False,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
tools=[terminal, browser, file_editor, create_file], tools=[terminal, browser, file_editor, create_file],
@ -118,7 +117,6 @@ agent_two = Agent(
max_loops=1, max_loops=1,
autosave=False, autosave=False,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
tools=[terminal, browser, file_editor, create_file], tools=[terminal, browser, file_editor, create_file],

@ -8,7 +8,6 @@ director = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -22,7 +21,6 @@ accountant1 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -36,7 +34,6 @@ accountant2 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",

@ -14,7 +14,6 @@ class MySwarm(BaseSwarm):
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
) )
@ -25,7 +24,6 @@ class MySwarm(BaseSwarm):
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
) )
@ -36,7 +34,6 @@ class MySwarm(BaseSwarm):
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
) )

@ -215,7 +215,6 @@
" max_loops=3,\n", " max_loops=3,\n",
" autosave=True,\n", " autosave=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n", " verbose=True,\n",
" stopping_token=\"<DONE>\",\n", " stopping_token=\"<DONE>\",\n",
" interactive=True, # Set to True\n", " interactive=True, # Set to True\n",
@ -331,7 +330,6 @@
" max_loops=\"auto\",\n", " max_loops=\"auto\",\n",
" autosave=True,\n", " autosave=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n", " verbose=True,\n",
" stopping_token=\"<DONE>\",\n", " stopping_token=\"<DONE>\",\n",
" interactive=True,\n", " interactive=True,\n",
@ -397,7 +395,6 @@
" max_loops=3,\n", " max_loops=3,\n",
" autosave=True,\n", " autosave=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n", " verbose=True,\n",
" interactive=True,\n", " interactive=True,\n",
" # Set the output type to the tool schema which is a BaseModel\n", " # Set the output type to the tool schema which is a BaseModel\n",
@ -1405,7 +1402,6 @@
" max_loops=1,\n", " max_loops=1,\n",
" autosave=True,\n", " autosave=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n", " verbose=True,\n",
" stopping_token=\"<DONE>\",\n", " stopping_token=\"<DONE>\",\n",
")\n", ")\n",
@ -1421,7 +1417,6 @@
" system_prompt=\"Summarize the transcript\",\n", " system_prompt=\"Summarize the transcript\",\n",
" autosave=True,\n", " autosave=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n", " verbose=True,\n",
" stopping_token=\"<DONE>\",\n", " stopping_token=\"<DONE>\",\n",
")\n", ")\n",
@ -1437,7 +1432,6 @@
" system_prompt=\"Finalize the transcript\",\n", " system_prompt=\"Finalize the transcript\",\n",
" autosave=True,\n", " autosave=True,\n",
" dashboard=False,\n", " dashboard=False,\n",
" streaming_on=True,\n",
" verbose=True,\n", " verbose=True,\n",
" stopping_token=\"<DONE>\",\n", " stopping_token=\"<DONE>\",\n",
")\n", ")\n",

@ -18,7 +18,6 @@ class AgentInput(BaseModel):
dynamic_temperature_enabled: bool = False dynamic_temperature_enabled: bool = False
dashboard: bool = False dashboard: bool = False
verbose: bool = False verbose: bool = False
streaming_on: bool = True
saved_state_path: Optional[str] = None saved_state_path: Optional[str] = None
sop: Optional[str] = None sop: Optional[str] = None
sop_list: Optional[List[str]] = None sop_list: Optional[List[str]] = None
@ -65,7 +64,6 @@ def parse_yaml_to_json(yaml_str: str) -> str:
# dynamic_temperature_enabled: true # dynamic_temperature_enabled: true
# dashboard: true # dashboard: true
# verbose: true # verbose: true
# streaming_on: false
# saved_state_path: "/path/to/state" # saved_state_path: "/path/to/state"
# sop: "Standard operating procedure" # sop: "Standard operating procedure"
# sop_list: ["step1", "step2"] # sop_list: ["step1", "step2"]
@ -99,7 +97,6 @@ def create_agent_from_yaml(yaml_path: str) -> None:
), ),
dashboard=agent_config.get("dashboard", False), dashboard=agent_config.get("dashboard", False),
verbose=agent_config.get("verbose", False), verbose=agent_config.get("verbose", False),
streaming_on=agent_config.get("streaming_on", True),
saved_state_path=agent_config.get("saved_state_path"), saved_state_path=agent_config.get("saved_state_path"),
retry_attempts=agent_config.get("retry_attempts", 3), retry_attempts=agent_config.get("retry_attempts", 3),
context_length=agent_config.get("context_length", 8192), context_length=agent_config.get("context_length", 8192),

@ -125,7 +125,6 @@ diagnoser_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="diagnoser.json", # saved_state_path="diagnoser.json",
multi_modal=True, multi_modal=True,
@ -139,7 +138,6 @@ harvester_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="harvester.json", # saved_state_path="harvester.json",
multi_modal=True, multi_modal=True,
@ -153,7 +151,6 @@ growth_predictor_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="growth_predictor.json", # saved_state_path="growth_predictor.json",
multi_modal=True, multi_modal=True,
@ -167,7 +164,6 @@ treatment_recommender_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="treatment_recommender.json", # saved_state_path="treatment_recommender.json",
multi_modal=True, multi_modal=True,
@ -181,7 +177,6 @@ disease_detector_agent = Agent(
llm=llm, llm=llm,
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
# saved_state_path="disease_detector.json", # saved_state_path="disease_detector.json",
multi_modal=True, multi_modal=True,

@ -128,7 +128,6 @@ class Agent(BaseStructure):
preset_stopping_token (bool): Enable preset stopping token preset_stopping_token (bool): Enable preset stopping token
traceback (Any): The traceback traceback (Any): The traceback
traceback_handlers (Any): The traceback handlers traceback_handlers (Any): The traceback handlers
streaming_on (bool): Enable streaming
Methods: Methods:
run: Run the agent run: Run the agent
@ -214,7 +213,6 @@ class Agent(BaseStructure):
preset_stopping_token: Optional[bool] = False, preset_stopping_token: Optional[bool] = False,
traceback: Optional[Any] = None, traceback: Optional[Any] = None,
traceback_handlers: Optional[Any] = None, traceback_handlers: Optional[Any] = None,
streaming_on: Optional[bool] = False,
docs: List[str] = None, docs: List[str] = None,
docs_folder: Optional[str] = None, docs_folder: Optional[str] = None,
verbose: Optional[bool] = False, verbose: Optional[bool] = False,
@ -303,7 +301,6 @@ class Agent(BaseStructure):
self.preset_stopping_token = preset_stopping_token self.preset_stopping_token = preset_stopping_token
self.traceback = traceback self.traceback = traceback
self.traceback_handlers = traceback_handlers self.traceback_handlers = traceback_handlers
self.streaming_on = streaming_on
self.docs = docs self.docs = docs
self.docs_folder = docs_folder self.docs_folder = docs_folder
self.verbose = verbose self.verbose = verbose
@ -780,9 +777,6 @@ class Agent(BaseStructure):
response = self.llm(*response_args, **kwargs) response = self.llm(*response_args, **kwargs)
# Print # Print
if self.streaming_on is True:
response = self.stream_response(response)
else:
self.printtier(response) self.printtier(response)
# Add the response to the memory # Add the response to the memory
@ -1334,7 +1328,6 @@ class Agent(BaseStructure):
"preset_stopping_token": self.preset_stopping_token, "preset_stopping_token": self.preset_stopping_token,
"traceback": self.traceback, "traceback": self.traceback,
"traceback_handlers": self.traceback_handlers, "traceback_handlers": self.traceback_handlers,
"streaming_on": self.streaming_on,
"docs": self.docs, "docs": self.docs,
"docs_folder": self.docs_folder, "docs_folder": self.docs_folder,
"verbose": self.verbose, "verbose": self.verbose,
@ -1747,35 +1740,6 @@ class Agent(BaseStructure):
return response return response
def stream_response(self, response: str, delay: float = 0.001) -> None:
"""
Streams the response token by token.
Args:
response (str): The response text to be streamed.
delay (float, optional): Delay in seconds between printing each token. Default is 0.1 seconds.
Raises:
ValueError: If the response is not provided.
Exception: For any errors encountered during the streaming process.
Example:
response = "This is a sample response from the API."
stream_response(response)
"""
# Check for required inputs
if not response:
raise ValueError("Response is required.")
try:
# Stream and print the response token by token
for token in response.split():
print(token, end=" ", flush=True)
time.sleep(delay)
print() # Ensure a newline after streaming
except Exception as e:
print(f"An error occurred during streaming: {e}")
def dynamic_context_window(self): def dynamic_context_window(self):
""" """
dynamic_context_window essentially clears everything execep dynamic_context_window essentially clears everything execep

@ -37,7 +37,6 @@ class AgentSchemaBaseModel(BaseModel):
preset_stopping_token: Optional[bool] = False preset_stopping_token: Optional[bool] = False
traceback: Optional[Any] = None traceback: Optional[Any] = None
traceback_handlers: Optional[Any] = None traceback_handlers: Optional[Any] = None
streaming_on: Optional[bool] = False
docs: Optional[List[str]] = None docs: Optional[List[str]] = None
docs_folder: Optional[str] = None docs_folder: Optional[str] = None
verbose: Optional[bool] = True verbose: Optional[bool] = True

@ -287,7 +287,6 @@ class AgentLoadBalancer(BaseSwarm):
# max_loops="auto", # max_loops="auto",
# autosave=True, # autosave=True,
# dashboard=False, # dashboard=False,
# streaming_on=True,
# verbose=True, # verbose=True,
# stopping_token="<DONE>", # stopping_token="<DONE>",
# interactive=True, # interactive=True,
@ -301,7 +300,6 @@ class AgentLoadBalancer(BaseSwarm):
# max_loops="auto", # max_loops="auto",
# autosave=True, # autosave=True,
# dashboard=False, # dashboard=False,
# streaming_on=True,
# verbose=True, # verbose=True,
# stopping_token="<DONE>", # stopping_token="<DONE>",
# interactive=True, # interactive=True,

@ -15,7 +15,6 @@ director = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -31,7 +30,6 @@ worker1 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",
@ -46,7 +44,6 @@ worker2 = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
dashboard=False, dashboard=False,
streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
state_save_file_type="json", state_save_file_type="json",

Loading…
Cancel
Save