From 243bda2efa865813805070c55e8bd19689259693 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Sun, 9 Jun 2024 03:24:00 +0900 Subject: [PATCH 01/13] docs: update README.md faciliate -> facilitate --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 13f69301..51681087 100644 --- a/README.md +++ b/README.md @@ -532,7 +532,7 @@ print(f"Task result: {task.result}") # Multi-Agent Orchestration: -Swarms was designed to faciliate the communication between many different and specialized agents from a vast array of other frameworks such as langchain, autogen, crew, and more. +Swarms was designed to facilitate the communication between many different and specialized agents from a vast array of other frameworks such as langchain, autogen, crew, and more. In traditional swarm theory, there are many types of swarms usually for very specialized use-cases and problem sets. Such as Hiearchical and sequential are great for accounting and sales, because there is usually a boss coordinator agent that distributes a workload to other specialized agents. From f2ac193e3fd67723917a31b3a4242671b3d4c77e Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 11:49:01 -0700 Subject: [PATCH 02/13] [demo] --- docs/mkdocs.yml | 23 +- .../structs/multi_agent_orchestration.md | 15 + playground/demos/octomology_swarm/api.py | 115 +- playground/weatherman_agent/.env.example | 5 + playground/weatherman_agent/.gitignore | 204 +++ playground/weatherman_agent/README.md | 112 ++ playground/weatherman_agent/api.py | 119 ++ .../weatherman_agent/datasets/examples.csv | 40 + .../datasets/rain_weather_prompts.csv | 26 + .../datasets/weatherman_agent_LLM_prompts.csv | 26 + .../weatherman_agent/docs/llama3_hosted.md | 112 ++ .../weatherman_agent/docs/weather_agent.md | 113 ++ .../examples/baron_tool_with_swarms_tool.py | 30 + .../examples/llama_3_hosted_swarms.py | 19 + .../weatherman_agent/examples/llama_agent.py | 34 + .../weatherman_agent/examples/tool_schemas.py | 35 + playground/weatherman_agent/pyproject.toml | 55 + playground/weatherman_agent/requirements.txt | 18 + .../weatherman_agent/scripts/Dockerfile | 28 + playground/weatherman_agent/scripts/setup.sh | 0 .../tests/test_baron_tools.py | 56 + .../weatherman_agent/tests/test_llama3.py | 41 + .../tests/tests_weather_agent.py | 161 +++ .../weatherman_agent/todo/director_agent.py | 279 ++++ .../weatherman_agent/todo/worker_agents.py | 269 ++++ playground/weatherman_agent/weather_agent.py | 50 + .../weather_swarm/__init__.py | 0 .../weatherman_agent/weather_swarm/prompts.py | 152 ++ .../weather_swarm/tools/__init__.py | 0 .../weather_swarm/tools/baron_tools_schema.py | 145 ++ .../tools/get_geo_coordinates.py | 109 ++ .../weather_swarm/tools/tools.py | 1281 +++++++++++++++++ swarms/structs/concat.py | 24 + swarms/structs/mixture_of_agents.py | 159 ++ 34 files changed, 3784 insertions(+), 71 deletions(-) create mode 100644 docs/swarms/structs/multi_agent_orchestration.md create mode 100644 playground/weatherman_agent/.env.example create mode 100644 playground/weatherman_agent/.gitignore create mode 100644 playground/weatherman_agent/README.md create mode 100644 playground/weatherman_agent/api.py create mode 100644 playground/weatherman_agent/datasets/examples.csv create mode 100644 playground/weatherman_agent/datasets/rain_weather_prompts.csv create mode 100644 playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv create mode 100644 playground/weatherman_agent/docs/llama3_hosted.md create mode 100644 playground/weatherman_agent/docs/weather_agent.md create mode 100644 playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py create mode 100644 playground/weatherman_agent/examples/llama_3_hosted_swarms.py create mode 100644 playground/weatherman_agent/examples/llama_agent.py create mode 100644 playground/weatherman_agent/examples/tool_schemas.py create mode 100644 playground/weatherman_agent/pyproject.toml create mode 100644 playground/weatherman_agent/requirements.txt create mode 100644 playground/weatherman_agent/scripts/Dockerfile create mode 100644 playground/weatherman_agent/scripts/setup.sh create mode 100644 playground/weatherman_agent/tests/test_baron_tools.py create mode 100644 playground/weatherman_agent/tests/test_llama3.py create mode 100644 playground/weatherman_agent/tests/tests_weather_agent.py create mode 100644 playground/weatherman_agent/todo/director_agent.py create mode 100644 playground/weatherman_agent/todo/worker_agents.py create mode 100644 playground/weatherman_agent/weather_agent.py create mode 100644 playground/weatherman_agent/weather_swarm/__init__.py create mode 100644 playground/weatherman_agent/weather_swarm/prompts.py create mode 100644 playground/weatherman_agent/weather_swarm/tools/__init__.py create mode 100644 playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py create mode 100644 playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py create mode 100644 playground/weatherman_agent/weather_swarm/tools/tools.py create mode 100644 swarms/structs/concat.py create mode 100644 swarms/structs/mixture_of_agents.py diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 18c6a0a8..3afa0044 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -1,4 +1,3 @@ - docs_dir: '.' # replace with the correct path if your documentation files are not in the same directory as mkdocs.yml site_name: Swarms Documentation site_url: https://swarms.apac.ai @@ -90,10 +89,22 @@ markdown_extensions: - footnotes nav: - Home: - - Overview: "index.md" - - Install: "swarms/install/install.md" - - Docker Setup: "swarms/install/docker_setup.md" + - Overview: "index.md" + - Install: "swarms/install/install.md" + - Docker Setup: "swarms/install/docker_setup.md" + - Contributing: "contributing.md" - Framework: + - Overview: "swarms/" + - Models: "swarms/models/index.md" + - Agents: + - Build Agents: "swarms/structs/diy_your_own_agent.md" + - Agents with Memory: "swarms/memory/diy_memory.md" + - Agents with tools: "swarms/tools/main.md" + # - Integrating Agents from Langchain, CrewAI, and Autogen: "swarms" + - Multi-Agent Collaboration: + - Overview: "swarms/structs/multi_agent_orchestration.md" + - Workflows: "swarms/structs/workflows.md" + - Reference: - Overview: "swarms/index.md" - Models: - How to Create A Custom Language Model: "swarms/models/custom_model.md" @@ -157,6 +168,4 @@ nav: - SequentialWorkflow: "examples/reliable_autonomous_agents.md" - References: - Agent Glossary: "swarms/glossary.md" - - List of The Best Multi-Agent Papers: "swarms/papers.md" - - Contributors: - - Contributing: "contributing.md" + - List of The Best Multi-Agent Papers: "swarms/papers.md" \ No newline at end of file diff --git a/docs/swarms/structs/multi_agent_orchestration.md b/docs/swarms/structs/multi_agent_orchestration.md new file mode 100644 index 00000000..80dedff3 --- /dev/null +++ b/docs/swarms/structs/multi_agent_orchestration.md @@ -0,0 +1,15 @@ +# Multi-Agent Orchestration: +Swarms was designed to faciliate the communication between many different and specialized agents from a vast array of other frameworks such as langchain, autogen, crew, and more. + +In traditional swarm theory, there are many types of swarms usually for very specialized use-cases and problem sets. Such as Hiearchical and sequential are great for accounting and sales, because there is usually a boss coordinator agent that distributes a workload to other specialized agents. + + +| **Name** | **Description** | **Code Link** | **Use Cases** | +|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------|---------------------------------------------------------------------------------------------------| +| Hierarchical Swarms | A system where agents are organized in a hierarchy, with higher-level agents coordinating lower-level agents to achieve complex tasks. | [Code Link](#) | Manufacturing process optimization, multi-level sales management, healthcare resource coordination | +| Agent Rearrange | A setup where agents rearrange themselves dynamically based on the task requirements and environmental conditions. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | Adaptive manufacturing lines, dynamic sales territory realignment, flexible healthcare staffing | +| Concurrent Workflows | Agents perform different tasks simultaneously, coordinating to complete a larger goal. | [Code Link](#) | Concurrent production lines, parallel sales operations, simultaneous patient care processes | +| Sequential Coordination | Agents perform tasks in a specific sequence, where the completion of one task triggers the start of the next. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) | Step-by-step assembly lines, sequential sales processes, stepwise patient treatment workflows | +| Parallel Processing | Agents work on different parts of a task simultaneously to speed up the overall process. | [Code Link](#) | Parallel data processing in manufacturing, simultaneous sales analytics, concurrent medical tests | + + diff --git a/playground/demos/octomology_swarm/api.py b/playground/demos/octomology_swarm/api.py index d826b4e4..203ba051 100644 --- a/playground/demos/octomology_swarm/api.py +++ b/playground/demos/octomology_swarm/api.py @@ -1,13 +1,11 @@ import os from dotenv import load_dotenv -from fastapi.responses import JSONResponse from pydantic import BaseModel, Field from swarms import Agent from swarms.models import OpenAIChat from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.structs.rearrange import AgentRearrange -from fastapi import FastAPI from typing import Optional, List, Dict, Any # Load the environment variables @@ -25,7 +23,7 @@ openai = OpenAIChat( # Setup the FastAPI app -app = FastAPI() +# app = FastAPI() def DIAGNOSIS_SYSTEM_PROMPT() -> str: @@ -120,64 +118,53 @@ class RunConfig(BaseModel): max_loops: Optional[int] = 1 -@app.get("/v1/health") -async def health_check(): - return JSONResponse(content={"status": "healthy"}) - - -@app.get("/v1/models_available") -async def models_available(): - available_models = { - "models": [ - {"name": "gpt-4-1106-vision-preview", "type": "vision"}, - {"name": "openai-chat", "type": "text"}, - ] - } - return JSONResponse(content=available_models) - - -@app.get("/v1/swarm/completions") -async def run_agents(run_config: RunConfig): - # Diagnoser agent - diagnoser = Agent( - # agent_name="Medical Image Diagnostic Agent", - agent_name="D", - system_prompt=DIAGNOSIS_SYSTEM_PROMPT(), - llm=llm, - max_loops=1, - autosave=True, - dashboard=True, - ) - - # Agent 2 the treatment plan provider - treatment_plan_provider = Agent( - # agent_name="Medical Treatment Recommendation Agent", - agent_name="T", - system_prompt=TREATMENT_PLAN_SYSTEM_PROMPT(), - llm=openai, - max_loops=1, - autosave=True, - dashboard=True, - ) - - # Agent 3 the re-arranger - rearranger = AgentRearrange( - agents=[diagnoser, treatment_plan_provider], - flow=run_config.flow, - max_loops=run_config.max_loops, - verbose=True, - ) - - # Run the rearranger - out = rearranger( - run_config.task, - image=run_config.image, - ) - - return JSONResponse(content=out) - - -if __name__ == "__main__": - import uvicorn - - uvicorn.run(app, host="0.0.0.0", port=8000) +# @app.get("/v1/health") +# async def health_check(): +# return JSONResponse(content={"status": "healthy"}) + + +# @app.get("/v1/models_available") +# async def models_available(): +# available_models = { +# "models": [ +# {"name": "gpt-4-1106-vision-preview", "type": "vision"}, +# {"name": "openai-chat", "type": "text"}, +# ] +# } +# return JSONResponse(content=available_models) + + +# @app.get("/v1/swarm/completions") +# async def run_agents(run_config: RunConfig): +# Diagnoser agent +diagnoser = Agent( + # agent_name="Medical Image Diagnostic Agent", + agent_name="D", + system_prompt=DIAGNOSIS_SYSTEM_PROMPT(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=True, +) + +# Agent 2 the treatment plan provider +treatment_plan_provider = Agent( + # agent_name="Medical Treatment Recommendation Agent", + agent_name="T", + system_prompt=TREATMENT_PLAN_SYSTEM_PROMPT(), + llm=openai, + max_loops=1, + autosave=True, + dashboard=True, +) + +# Agent 3 the re-arranger +rearranger = AgentRearrange( + agents=[diagnoser, treatment_plan_provider], + flow="D -> T", + max_loops=1, + verbose=True, +) + +# Run the agents +results = rearranger.run("") diff --git a/playground/weatherman_agent/.env.example b/playground/weatherman_agent/.env.example new file mode 100644 index 00000000..0fa1a6b7 --- /dev/null +++ b/playground/weatherman_agent/.env.example @@ -0,0 +1,5 @@ +ANTHROPIC_API_KEY="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" +SWARMS_API_KEY="GET YOUR KEY AT https://swarms.world/account" +BARON_API_HOST="http://api.velocityweather.com/v1" +BARON_ACCESS_KEY="Y5lHXZfgce7P" +BARON_ACCESS_KEY_SECRET="rcscpInzyLuweENUjUtFDmqLkK1N0EPeaWQRjy7er1"] \ No newline at end of file diff --git a/playground/weatherman_agent/.gitignore b/playground/weatherman_agent/.gitignore new file mode 100644 index 00000000..97476ea2 --- /dev/null +++ b/playground/weatherman_agent/.gitignore @@ -0,0 +1,204 @@ +__pycache__/ +.venv/ + +.env + +image/ +audio/ +video/ +dataframe/ + +static/generated +runs +chroma +Weather Director Agent_state.json +Unit Testing Agent_state.json +Devin_state.json +swarms/__pycache__ +artifacts +transcript_generator.json +venv +.DS_Store +Cargo.lock +.DS_STORE +Cargo.lock +swarms/agents/.DS_Store +artifacts_two +logs +_build +conversation.txt +t1_state.json +stderr_log.txt +t2_state.json +.vscode +.DS_STORE +# Byte-compiled / optimized / DLL files +Transcript Generator_state.json +__pycache__/ +*.py[cod] +*$py.class +.grit +swarm-worker-01_state.json +error.txt +Devin Worker 2_state.json +# C extensions +*.so +.ruff_cache + + +errors.txt + +Autonomous-Agent-XYZ1B_state.json +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py +.DS_Store +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ +.vscode/settings.json diff --git a/playground/weatherman_agent/README.md b/playground/weatherman_agent/README.md new file mode 100644 index 00000000..4a932029 --- /dev/null +++ b/playground/weatherman_agent/README.md @@ -0,0 +1,112 @@ +# Baron Weather + +## Overview +Baron Weather is a sophisticated toolset designed to enable real-time querying of weather data using the Baron API. It utilizes a swarm of autonomous agents to handle concurrent data requests, optimizing for efficiency and accuracy in weather data retrieval and analysis. + +## Features +Baron Weather includes the following key features: +- **Real-time Weather Data Access**: Instantly fetch and analyze weather conditions using the Baron API. +- **Autonomous Agents**: A swarm system for handling multiple concurrent API queries efficiently. +- **Data Visualization**: Tools for visualizing complex meteorological data for easier interpretation. + + +## Prerequisites +Before you begin, ensure you have met the following requirements: +- Python 3.10 or newer +- git installed on your machine +- Install packages like swarms + +## Installation + +There are 2 methods, git cloning which allows you to modify the codebase or pip install for simple usage: + +### Pip +`pip3 install -U weather-swarm` + +### Cloning the Repository +To get started with Baron Weather, clone the repository to your local machine using: + +```bash +git clone https://github.com/baronservices/weatherman_agent.git +cd weatherman_agent +``` + +### Setting Up the Environment +Create a Python virtual environment to manage dependencies: + +```bash +python -m venv venv +source venv/bin/activate # On Windows use `venv\Scripts\activate` +``` + +### Installing Dependencies +Install the necessary Python packages via pip: + +```bash +pip install -r requirements.txt +``` + +## Usage +To start querying the Baron Weather API using the autonomous agents, run: + +```bash +python main.py +``` + +## API + +```bash +python3 api.py +``` + + +### Llama3 + +```python +from swarms import llama3Hosted + + +# Example usage +llama3 = llama3Hosted( + model="meta-llama/Meta-Llama-3-8B-Instruct", + temperature=0.8, + max_tokens=1000, + system_prompt="You are a helpful assistant.", +) + +completion_generator = llama3.run( + "create an essay on how to bake chicken" +) + +print(completion_generator) + +``` + +# Documentation +- [Llama3Hosted](docs/llama3_hosted.md) + +## Contributing +Contributions to Baron Weather are welcome and appreciated. Here's how you can contribute: + +1. Fork the Project +2. Create your Feature Branch (`git checkout -b feature/YourAmazingFeature`) +3. Commit your Changes (`git commit -m 'Add some YourAmazingFeature'`) +4. Push to the Branch (`git push origin feature/YourAmazingFeature`) +5. Open a Pull Request + + +## Tests +To run tests run the following: + +`pytest` + +## Contact +Project Maintainer - [Kye Gomez](mailto:kye@swarms.world) - [GitHub Profile](https://github.com/baronservices) + + +# Todo +- [ ] Add the schemas to the worker agents to output json +- [ ] Implement the parser and the function calling mapping to execute the functions +- [ ] Implement the HiearArchical Swarm and plug in and all the agents +- [ ] Then, implement the API server wrapping the hiearchical swarm +- [ ] Then, Deploy on the server 24/7 \ No newline at end of file diff --git a/playground/weatherman_agent/api.py b/playground/weatherman_agent/api.py new file mode 100644 index 00000000..f872afd4 --- /dev/null +++ b/playground/weatherman_agent/api.py @@ -0,0 +1,119 @@ +import os +import uuid +from typing import Any, Dict, List + +from dotenv import load_dotenv +from fastapi import FastAPI, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel +from swarms import Agent, OpenAIChat +from swarms.utils.loguru_logger import logger + +from weather_swarm.prompts import ( + FEW_SHORT_PROMPTS, + GLOSSARY_PROMPTS, + WEATHER_AGENT_SYSTEM_PROMPT, +) +from weather_swarm.tools.tools import ( + point_query, + request_ndfd_basic, + request_ndfd_hourly, +) + +load_dotenv() + +logger.info("Starting the API server..") +app = FastAPI(debug=True) + +# Load the middleware to handle CORS +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +class ChatRequest(BaseModel): + model: str + prompt: str + max_tokens: int = 100 + temperature: float = 1.0 + + +class ChatResponse(BaseModel): + id: str + object: str + created: int + model: str + choices: List[Dict[str, Any]] + usage: Dict[str, Any] + + +@app.get("/v1/health") +async def health_check(): + return {"status": "ok"} + + +@app.get("/v1/models") +async def get_models(): + return {"models": ["WeatherMan Agent"]} + + +@app.post("/v1/chat/completions", response_model=ChatResponse) +async def chat_completions(request: ChatRequest): + if request.model != "WeatherMan Agent": + raise HTTPException(status_code=400, detail="Model not found") + + # Initialize the WeatherMan Agent + agent = Agent( + agent_name="WeatherMan Agent", + system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, + sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], + llm=OpenAIChat( + openai_api_key=os.getenv("OPENAI_API_KEY"), + max_tokens=request.max_tokens, + temperature=request.temperature, + ), + max_loops=1, + # dynamic_temperature_enabled=True, + # verbose=True, + output_type=str, + metadata_output_type="json", + function_calling_format_type="OpenAI", + function_calling_type="json", + tools=[point_query, request_ndfd_basic, request_ndfd_hourly], + ) + + # Response from the agent + + try: + response = agent.run(request.prompt) + return { + "id": uuid.uuid4(), + "object": "text_completion", + "created": int(os.times().system), + "model": agent.agent_name, + "choices": [{"text": response}], + "usage": { + "prompt_tokens": len(request.prompt.split()), + "completion_tokens": len(response.split()), + "total_tokens": len(request.prompt.split()) + + len(response.split()), + }, + } + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +# Example of how to run the FastAPI app +def deploy_app(host: str = "0.0.0.0", port: int = 8000): + import uvicorn + + uvicorn.run(app, host=host, port=port) + + +# Run the FastAPI app +if __name__ == "__main__": + deploy_app() diff --git a/playground/weatherman_agent/datasets/examples.csv b/playground/weatherman_agent/datasets/examples.csv new file mode 100644 index 00000000..d694d7ff --- /dev/null +++ b/playground/weatherman_agent/datasets/examples.csv @@ -0,0 +1,40 @@ +prompt,goal,required inputs,api example +What is the current temperature?,allow the user to request the current temperature for their location,user's location,"request_metar_nearest(""38"", ""-96"")" +Describe the current weather.,have the LLM construct a narrative weather description based on current conditions,user's location,"request_metar_nearest(""38"", ""-96"")" +How much rain fell at my location?,allow the user to determine how much rain has accumulated at their location in the last 24 hours,user's location,"point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)" +Is it going to be sunny tomorrow?,allow the user to determine cloud coverage for their location ,user's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" +Is rain expected at my location in the next 6 hours? ,allow the user to determine if precip will fall in the coming hours,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +What is the max forecasted temperature today? ,allow the user to determine how hot or cold the air temp will be,user's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" +Will it be windy today? ,allow the user to determine the max wind speed for that day,user's location,"point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)" +,,, +How much rain fell at my location on date/time?,,"user's location, date/time", +What dates did hail fall at my location during x time range? ,allow the user to request a list of dates at which hail fell at their location,"user's location, date range", +Is it good weather to spray fertilizer? ,,, +How will the weather today impact solar panel performance? ,,, +Will my soccer game get rained out this evening? ,"determine if rain will impact my location ""this evening""","user's location, current date","point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is it safe to go hiking today based on the weather? ,"check for high wind or rain forecast, perhaps extreme heat and cold","hiking location, current date", +What is the liklihood of frost tonight? ,are forecast conditions right for frost,"location, date", +What time will be the hottest part of the day tomorrow? ,determine highest forecast heat index tomorrow,"location, tomorrow's date", +When is it forecasted to rain again at my house? ,"use forecast precip rate or max reflectivity and/or accums to see if rain is forecasted in the next 3 days. If not, swap to GFS for days 4-14",location, +How will the weather impact my flight today? ,check against conditions commonly associated with flight delays,location/time of departure at airport, +Are there any flood warnings in my area? ,check against current watch/warning map,location, +How will the weather affect road conditions and traffic safety tomorrow morning?,"check forecasted road conditions, perhaps check for heavy precip rate, high accums, snow depth",location/route, +When was the last time it rained at my location? ,"use historical rainfall, weather inspector?","location, date range", +,,, +,,, +What's the highest temperature in United States right now?,determine the highest current temperature in the US,search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53 + +discard all METARs that do not begin with the letter K +" +What's the lowest temperature in United States right now?,determine the lowest current temperature in the US,search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53 + +discard all METARs that do not begin with the letter K +" +What's the highest temperature in the world right now?,determine the highest current temperature in the world,search all METARs, +What's the lowest temperature in the world right now?,determine the lowest current temperature in the world,search all METARs,https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/all.json?page=1&ts=1716776520&sig=LOC_xB0tt3qtoqmL8iy6wtguLXI= +,,, +,,, +,,, +,,, +,,, +Weather inspector tie in???,,, \ No newline at end of file diff --git a/playground/weatherman_agent/datasets/rain_weather_prompts.csv b/playground/weatherman_agent/datasets/rain_weather_prompts.csv new file mode 100644 index 00000000..6f9a6276 --- /dev/null +++ b/playground/weatherman_agent/datasets/rain_weather_prompts.csv @@ -0,0 +1,26 @@ +prompt,goal,required inputs,api example +How much rain fell at my location?,allow the user to determine how much rain has accumulated at their location in the last 24 hours,user's location,"point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)" +Is rain expected at my location in the next 6 hours?,"allow the user to determine if precip will fall in the coming hours, forecast query",user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +How much rain fell at my location on date/time?,historical query,"user's location, date/time",https://api.velocityweather.com/v1/cLRlLroVhajP/point/north-american-radar/Mask1-Mercator/2024-05-08T21%3A14%3A43Z.json?lat=35.505400093441324&lon=-87.60498046875&ts=1717294800&sig=_mCs5_XfZKQon55AzSGPI7dtoHY= +Will my soccer game get rained out this evening?,"determine if rain will impact my location ""this evening"", forecast query","user's location, current date","point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +When is it forecasted to rain again at my house?,"use forecast precip rate or max reflectivity and/or accums to see if rain is forecasted in the next 3 days. If not, swap to GFS for days 4-14",user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +When was the last time it rained at my location?,"use historical rainfall, weather inspector?","location, date range", +Is there any chance of rain during my commute today?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Should I bring an umbrella for my walk this afternoon?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will it rain for the outdoor concert tonight?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will it rain during my barbecue this weekend?,forecast query,location of bbq,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is there a storm expected in my area today?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will it rain on my drive to work tomorrow morning?,forecast query,user's location + work location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Are there any rain showers predicted for this afternoon?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Should I expect rain during my jog this evening?,forecast query,user's location.....where will they jog? will the LLM prompt?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +How likely is it to rain during my picnic at the park?,forecast query,user's location .... will the LLM prompt for the picnic location?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is rain expected when I plan to leave for the airport?,forecast query,user's location....will the LLM prompt for the location they'll depart from?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will the weather be dry for my cycling trip today?,forecast query,"location of cycling trip, starting point","point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is rain in the forecast for my beach outing tomorrow?,forecast query,location of beach,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will it rain during my son's baseball game tonight?,forecast query,location of baseball game,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Should I prepare for rain on my camping trip this weekend?,forecast query,location of campsite....will the LLM prompt for the campsite location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +What’s the rain forecast for my neighborhood today?,forecast query,neighbourhood location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is there any rainfall expected while I'm gardening this afternoon?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +How heavy is the rain expected to be tonight?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Are there any rainstorms predicted during my road trip?,forecast query,can the LLM prompt for location/route of the road trip? should we state we don't support multi-location prompts? can we pull this off?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will there be rain showers in my area over the next few days?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" diff --git a/playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv b/playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv new file mode 100644 index 00000000..4dfef6c7 --- /dev/null +++ b/playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv @@ -0,0 +1,26 @@ +prompt,goal,required inputs,api example +What is the current temperature?,Allow the user to request the current temperature for their location,User's location,"request_metar_nearest(""38"", ""-96"")" +Describe the current weather.,Have the LLM construct a narrative weather description based on current conditions,User's location,"request_metar_nearest(""38"", ""-96"")" +How much rain fell at my location?,Allow the user to determine how much rain has accumulated at their location in the last 24 hours,User's location,"point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)" +Is it going to be sunny tomorrow?,Allow the user to determine cloud coverage for their location,User's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" +Is rain expected at my location in the next 6 hours?,Allow the user to determine if precipitation will fall in the coming hours,User's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +What is the max forecasted temperature today?,Allow the user to determine how hot or cold the air temp will be,User's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" +Will it be windy today?,Allow the user to determine the max wind speed for that day,User's location,"point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)" +How much rain fell at my location on date/time?,,,User's location, date/time +What dates did hail fall at my location during x time range?,Allow the user to request a list of dates at which hail fell at their location,User's location, date range +Is it good weather to spray fertilizer?,,,, +How will the weather today impact solar panel performance?,,,, +Will my soccer game get rained out this evening?,Determine if rain will impact my location "this evening",User's location, current date,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is it safe to go hiking today based on the weather?,Check for high wind or rain forecast, perhaps extreme heat and cold,Hiking location, current date +What is the likelihood of frost tonight?,Are forecast conditions right for frost,Location, date +What time will be the hottest part of the day tomorrow?,Determine highest forecast heat index tomorrow,Location, tomorrow's date +When is it forecasted to rain again at my house?,Use forecast precip rate or max reflectivity and/or accums to see if rain is forecasted in the next 3 days. If not, swap to GFS for days 4-14,Location, +How will the weather impact my flight today?,Check against conditions commonly associated with flight delays,Location/time of departure at airport, +Are there any flood warnings in my area?,Check against current watch/warning map,Location, +How will the weather affect road conditions and traffic safety tomorrow morning?,Check forecasted road conditions, perhaps check for heavy precip rate, high accums, snow depth,Location/route, +When was the last time it rained at my location?,Use historical rainfall, weather inspector?,Location, date range +What's the highest temperature in United States right now?,Determine the highest current temperature in the US,Search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53" +What's the lowest temperature in United States right now?,Determine the lowest current temperature in the US,Search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53" +What's the highest temperature in the world right now?,Determine the highest current temperature in the world,Search all METARs,https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/all.json?page=1&ts=1716776520&sig=LOC_xB0tt3qtoqmL8iy6wtguLXI= +What's the lowest temperature in the world right now?,Determine the lowest current temperature in the world,Search all METARs,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/all.json?page=1&ts=1716776520&sig=LOC_xB0tt3qtoqmL8iy6wtguLXI=" +Weather inspector tie in???,,,, diff --git a/playground/weatherman_agent/docs/llama3_hosted.md b/playground/weatherman_agent/docs/llama3_hosted.md new file mode 100644 index 00000000..9fd770f4 --- /dev/null +++ b/playground/weatherman_agent/docs/llama3_hosted.md @@ -0,0 +1,112 @@ +# llama3Hosted Documentation + +## Overview + +The `llama3Hosted` class is a high-level interface for interacting with a hosted version of the Llama3 model. This class is designed to simplify the process of generating responses from the Llama3 model by providing an easy-to-use interface for sending requests and receiving responses. The Llama3 model is a state-of-the-art language model developed by Meta, known for its ability to generate human-like text based on the input it receives. + +### Key Features + +- **Model Customization**: Allows the user to specify which version of the Llama3 model to use. +- **Temperature Control**: Adjusts the randomness of the generated responses. +- **Token Limitation**: Sets a limit on the maximum number of tokens in the generated response. +- **System Prompt**: Defines the initial context for the conversation, guiding the model's responses. + +## Purpose + +The `llama3Hosted` class is designed to provide developers with a straightforward way to utilize the capabilities of the Llama3 model without dealing with the complexities of model hosting and API management. It is particularly useful for applications that require natural language understanding and generation, such as chatbots, virtual assistants, and content generation tools. + +## Class Definition + +### llama3Hosted Parameters + +| Parameter | Type | Default | Description | +|----------------|--------|-----------------------------------------|--------------------------------------------------------------| +| `model` | `str` | `"meta-llama/Meta-Llama-3-8B-Instruct"` | The name or path of the Llama3 model to use. | +| `temperature` | `float`| `0.8` | The temperature parameter for generating responses. | +| `max_tokens` | `int` | `4000` | The maximum number of tokens in the generated response. | +| `system_prompt`| `str` | `"You are a helpful assistant."` | The system prompt to use for generating responses. | +| `*args` | | | Variable length argument list. | +| `**kwargs` | | | Arbitrary keyword arguments. | + +### Attributes + +| Attribute | Type | Description | +|----------------|--------|--------------------------------------------------------------| +| `model` | `str` | The name or path of the Llama3 model. | +| `temperature` | `float`| The temperature parameter for generating responses. | +| `max_tokens` | `int` | The maximum number of tokens in the generated response. | +| `system_prompt`| `str` | The system prompt for generating responses. | + +## Method: run + +### Parameters + +| Parameter | Type | Description | +|-----------|--------|-----------------------------------| +| `task` | `str` | The user's task or input. | +| `*args` | | Variable length argument list. | +| `**kwargs`| | Arbitrary keyword arguments. | + +### Returns + +| Type | Description | +|------|--------------------------------------------| +| `str`| The generated response from the Llama3 model.| + +### Usage Examples +First install weather_swarm with: + +`$ pip install -U weather-swarm` + + +#### Example 1: Basic Usage + +```python +from weather_swarmn import llama3Hosted + +llama = llama3Hosted() +response = llama.run("Tell me a joke.") +print(response) +``` + +#### Example 2: Custom Model and Parameters + +```python +import requests +import json +from weather_swarmn import llama3Hosted + + +llama = llama3Hosted( + model="custom-llama-model", + temperature=0.5, + max_tokens=2000, + system_prompt="You are a witty assistant." +) +response = llama.run("What's the weather like today?") +print(response) +``` + +#### Example 3: Using Additional Arguments + +```python +from weather_swarmn import llama3Hosted + +llama = llama3Hosted() +response = llama.run("Write a short story.", custom_stop_tokens=[128002, 128003]) +print(response) +``` + +## Additional Information and Tips + +- **Temperature Parameter**: The temperature parameter controls the randomness of the model's output. Lower values (close to 0) make the output more deterministic, while higher values (up to 1) make it more random. +- **System Prompt**: Crafting an effective system prompt can significantly impact the quality and relevance of the model's responses. Ensure the prompt aligns well with the intended use case. +- **Error Handling**: Always include error handling when making API requests to ensure your application can gracefully handle any issues that arise. + +## References and Resources + +- [Llama3 Model Documentation](https://github.com/facebookresearch/llama) +- [Requests Library Documentation](https://docs.python-requests.org/en/latest/) +- [JSON Library Documentation](https://docs.python.org/3/library/json.html) + +This documentation provides a comprehensive overview of the `llama3Hosted` class, its parameters, attributes, methods, and usage examples. By following this guide, developers can effectively integrate and utilize the Llama3 model in their applications. \ No newline at end of file diff --git a/playground/weatherman_agent/docs/weather_agent.md b/playground/weatherman_agent/docs/weather_agent.md new file mode 100644 index 00000000..58bf7ad0 --- /dev/null +++ b/playground/weatherman_agent/docs/weather_agent.md @@ -0,0 +1,113 @@ +## Weather Agent API Documentation + +### Overview +The Weather Agent API provides endpoints to interact with a weather prediction model, "WeatherMan Agent". This API allows users to get weather-related information through chat completions using the OpenAI GPT model with specific prompts and tools. + +### Base URL +``` +http://localhost:8000 +``` + +### Endpoints + +#### Health Check + +##### `GET /v1/health` +Checks the health status of the API. + +**Response:** +- `200 OK`: Returns a JSON object indicating the status of the API. + ```json + { + "status": "ok" + } + ``` + +#### Get Models + +##### `GET /v1/models` +Retrieves the list of available models. + +**Response:** +- `200 OK`: Returns a JSON object with the list of models. + ```json + { + "models": ["WeatherMan Agent"] + } + ``` + +#### Chat Completions + +##### `POST /v1/chat/completions` +Generates weather-related responses based on the provided prompt using the "WeatherMan Agent" model. + +**Request Body:** +- `model` (string): The name of the model to use. Must be "WeatherMan Agent". +- `prompt` (string): The input prompt for the chat completion. +- `max_tokens` (integer, optional): The maximum number of tokens to generate. Default is 100. +- `temperature` (float, optional): The sampling temperature for the model. Default is 1.0. + +**Example Request:** +```json +{ + "model": "WeatherMan Agent", + "prompt": "What will the weather be like tomorrow in New York?", + "max_tokens": 100, + "temperature": 1.0 +} +``` + +**Response:** +- `200 OK`: Returns a JSON object with the completion result. + ```json + { + "id": "unique-id", + "object": "text_completion", + "created": 1234567890, + "model": "WeatherMan Agent", + "choices": [ + { + "text": "The weather tomorrow in New York will be..." + } + ], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 15, + "total_tokens": 25 + } + } + ``` +- `400 Bad Request`: If the model specified is not "WeatherMan Agent". + ```json + { + "detail": "Model not found" + } + ``` +- `500 Internal Server Error`: If there is an error processing the request. + ```json + { + "detail": "Error message" + } + ``` + +### Models +The API supports the following model: +- **WeatherMan Agent**: A specialized agent for providing weather-related information based on the prompt. + +### Usage + +1. **Health Check:** Verify that the API is running by sending a GET request to `/v1/health`. +2. **Get Models:** Retrieve the list of available models by sending a GET request to `/v1/models`. +3. **Chat Completions:** Generate a weather-related response by sending a POST request to `/v1/chat/completions` with the required parameters. + +### Error Handling +The API returns appropriate HTTP status codes and error messages for different error scenarios: +- `400 Bad Request` for invalid requests. +- `500 Internal Server Error` for unexpected errors during processing. + +### CORS Configuration +The API allows cross-origin requests from any origin, supporting all methods and headers. + +--- + +For further assistance or issues, please contact the API support team. \ No newline at end of file diff --git a/playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py b/playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py new file mode 100644 index 00000000..b2ede198 --- /dev/null +++ b/playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py @@ -0,0 +1,30 @@ +from weather_swarm.tools.tools import request_metar_nearest +from swarms import tool + + +@tool( + name="RequestMetarNearest", + description=( + "Requests the nearest METAR (Meteorological Aerodrome Report)" + " data based on the given latitude and longitude." + ), + return_string=False, + return_dict=False, +) +def request_metar_nearest_new(lat: float, lon: float): + """ + Requests the nearest METAR (Meteorological Aerodrome Report) data based on the given latitude and longitude. + + Args: + lat (float): The latitude of the location. + lon (float): The longitude of the location. + + Returns: + The METAR data for the nearest location. + """ + return request_metar_nearest(lat, lon) + + +out = request_metar_nearest_new(37.7749, -122.4194) +print(out) +print(type(out)) diff --git a/playground/weatherman_agent/examples/llama_3_hosted_swarms.py b/playground/weatherman_agent/examples/llama_3_hosted_swarms.py new file mode 100644 index 00000000..78292685 --- /dev/null +++ b/playground/weatherman_agent/examples/llama_3_hosted_swarms.py @@ -0,0 +1,19 @@ +from swarms import llama3Hosted + + +# Example usage +llama3 = llama3Hosted( + model="meta-llama/Meta-Llama-3-8B-Instruct", + temperature=0.8, + max_tokens=1000, + system_prompt=( + "You're a weather agent for Baron Weather, you specialize in" + " weather analysis" + ), +) + +completion_generator = llama3.run( + "What are the best weather conditions to lay concrete", +) + +print(completion_generator) diff --git a/playground/weatherman_agent/examples/llama_agent.py b/playground/weatherman_agent/examples/llama_agent.py new file mode 100644 index 00000000..6debdd38 --- /dev/null +++ b/playground/weatherman_agent/examples/llama_agent.py @@ -0,0 +1,34 @@ +from swarms import Agent +from swarms import llama3Hosted +from weather_swarm.prompts import GLOSSARY_PROMPTS +from weather_swarm.prompts import ( + FEW_SHORT_PROMPTS, + WEATHER_AGENT_SYSTEM_PROMPT, +) + + +# Purpose = To generate weather information for the user and send API requests to the Baron Weather API +agent = Agent( + agent_name="WeatherMan Agent", + system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, + sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], + # sop=list_tool_schemas_json, + llm=llama3Hosted( + max_tokens=2000, + temperature=0.1, + ), + max_loops="auto", + autosave=True, + dashboard=False, + streaming_on=True, + interactive=True, +) + +# Run the agent to generate the person's information +generated_data = agent.run( + "Based on the current humidity in Huntsville, how frizzy will my" + " hair get?" +) + +# Print the generated data +# print(f"Generated data: {generated_data}") diff --git a/playground/weatherman_agent/examples/tool_schemas.py b/playground/weatherman_agent/examples/tool_schemas.py new file mode 100644 index 00000000..3cb561c2 --- /dev/null +++ b/playground/weatherman_agent/examples/tool_schemas.py @@ -0,0 +1,35 @@ +from swarms import get_openai_function_schema_from_func + +from weather_swarm.tools.tools import ( + request_metar_nearest, + point_query, + request_ndfd_basic, + # point_query_region, + request_ndfd_hourly, +) + + +def get_schemas_for_funcs(funcs): + schemas = [] + for func in funcs: + name = str(func.__name__) + description = str(func.__doc__) + schema = get_openai_function_schema_from_func( + func, name=name, description=description + ) + schemas.append(str(schema)) + merged_schemas = "\n".join(schemas) + return merged_schemas + + +funcs = [ + request_metar_nearest, + point_query, + request_ndfd_basic, + # point_query_region, + request_ndfd_hourly, +] + +schemas = get_schemas_for_funcs(funcs) +print(schemas) +print(type(schemas)) diff --git a/playground/weatherman_agent/pyproject.toml b/playground/weatherman_agent/pyproject.toml new file mode 100644 index 00000000..5d38a19a --- /dev/null +++ b/playground/weatherman_agent/pyproject.toml @@ -0,0 +1,55 @@ +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "weather-swarm" +version = "0.0.6" +description = "Weather Swarm - Pytorch" +license = "MIT" +authors = ["Kye Gomez "] +homepage = "https://github.com/baronservices/weatherman_agent" +documentation = "https://github.com/baronservices/weatherman_agent" # Add this if you have documentation. +readme = "README.md" # Assuming you have a README.md +repository = "https://github.com/baronservices/weatherman_agent" +keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3.9" +] + +[tool.poetry.dependencies] +python = "^3.10" +swarms = "*" +pydantic = "2.7.1" + + + +[tool.poetry.group.lint.dependencies] +ruff = "^0.1.6" +types-toml = "^0.10.8.1" +types-redis = "^4.3.21.6" +types-pytz = "^2023.3.0.0" +black = "^23.1.0" +types-chardet = "^5.0.4.6" +mypy-protobuf = "^3.0.0" + + +[tool.autopep8] +max_line_length = 80 +ignore = "E501,W6" # or ["E501", "W6"] +in-place = true +recursive = true +aggressive = 3 + + +[tool.ruff] +line-length = 70 + +[tool.black] +line-length = 70 +target-version = ['py38'] +preview = true diff --git a/playground/weatherman_agent/requirements.txt b/playground/weatherman_agent/requirements.txt new file mode 100644 index 00000000..a26b8b84 --- /dev/null +++ b/playground/weatherman_agent/requirements.txt @@ -0,0 +1,18 @@ +swarms +pydantic==2.7.1 +base64==1.0.0 +datetime==4.3 +hashlib==20081119 +hmac==20151222 +shutil==1.7.0 +urllib3==1.26.7 +json5==0.9.6 +codecs==1.0.0 +fastapi +pytest +hydra +loguru +requests +opencv-python +beartype +termcolor diff --git a/playground/weatherman_agent/scripts/Dockerfile b/playground/weatherman_agent/scripts/Dockerfile new file mode 100644 index 00000000..7213ac11 --- /dev/null +++ b/playground/weatherman_agent/scripts/Dockerfile @@ -0,0 +1,28 @@ +# Use an official Python runtime as a parent image +FROM python:3.10-slim-buster + +# Set environment varibles +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONUNBUFFERED 1 + +# Set work directory +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + default-libmysqlclient-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install Python dependencies +COPY requirements.txt /app/ +RUN pip install --no-cache-dir -r requirements.txt + +# Copy project +COPY . /app/ + +# Expose port +EXPOSE 5000 + +# Run the application: +CMD ["gunicorn", "-w", "4", "-k", "gevent", "api:app"] \ No newline at end of file diff --git a/playground/weatherman_agent/scripts/setup.sh b/playground/weatherman_agent/scripts/setup.sh new file mode 100644 index 00000000..e69de29b diff --git a/playground/weatherman_agent/tests/test_baron_tools.py b/playground/weatherman_agent/tests/test_baron_tools.py new file mode 100644 index 00000000..4b21856a --- /dev/null +++ b/playground/weatherman_agent/tests/test_baron_tools.py @@ -0,0 +1,56 @@ +from unittest.mock import patch +from weather_swarm.tools.tools import ( + request_metar_nearest, + point_query, + request_ndfd_basic, +) + + +class TestWeatherFunctions: + @patch("your_module.request_metar_nearest") + def test_request_metar_nearest(self, mock_request_metar_nearest): + mock_request_metar_nearest.return_value = "expected_value" + result = request_metar_nearest("38", "-96") + assert result == "expected_value" + + @patch("your_module.point_query") + def test_point_query_precip_totalaccum(self, mock_point_query): + mock_point_query.return_value = "expected_value" + result = point_query( + "precip-totalaccum-24hr", "Standard-Mercator", -86.6, 34.4 + ) + assert result == "expected_value" + + @patch("your_module.point_query") + def test_point_query_baron_hires_maxreflectivity( + self, mock_point_query + ): + mock_point_query.return_value = "expected_value" + result = point_query( + "baron-hires-maxreflectivity-dbz-all", + "Mask1-Mercator", + -86.6, + 34.4, + ) + assert result == "expected_value" + + @patch("your_module.point_query") + def test_point_query_baron_hires_windspeed( + self, mock_point_query + ): + mock_point_query.return_value = "expected_value" + result = point_query( + "baron-hires-windspeed-mph-10meter", + "Standard-Mercator", + -86.6, + 34.4, + ) + assert result == "expected_value" + + @patch("your_module.request_ndfd_basic") + def test_request_ndfd_basic(self, mock_request_ndfd_basic): + mock_request_ndfd_basic.return_value = "expected_value" + result = request_ndfd_basic( + 34.730301, -86.586098, "forecast_time" + ) + assert result == "expected_value" diff --git a/playground/weatherman_agent/tests/test_llama3.py b/playground/weatherman_agent/tests/test_llama3.py new file mode 100644 index 00000000..2e98c03d --- /dev/null +++ b/playground/weatherman_agent/tests/test_llama3.py @@ -0,0 +1,41 @@ +from unittest.mock import Mock, patch +from swarms import llama3Hosted + + +class TestLlama3Hosted: + def setup_method(self): + self.llama = llama3Hosted() + + def test_init(self): + assert ( + self.llama.model == "meta-llama/Meta-Llama-3-8B-Instruct" + ) + assert self.llama.temperature == 0.8 + assert self.llama.max_tokens == 4000 + assert ( + self.llama.system_prompt == "You are a helpful assistant." + ) + + @patch("requests.request") + def test_run(self, mock_request): + mock_response = Mock() + expected_result = "Test response" + mock_response.json.return_value = { + "choices": [{"message": {"content": expected_result}}] + } + mock_request.return_value = mock_response + + result = self.llama.run("Test task") + assert result == expected_result + mock_request.assert_called_once_with( + "POST", + "http://34.204.8.31:30001/v1/chat/completions", + headers={"Content-Type": "application/json"}, + data=( + '{"model": "meta-llama/Meta-Llama-3-8B-Instruct",' + ' "messages": [{"role": "system", "content": "You are' + ' a helpful assistant."}, {"role": "user", "content":' + ' "Test task"}], "stop_token_ids": [128009, 128001],' + ' "temperature": 0.8, "max_tokens": 4000}' + ), + ) diff --git a/playground/weatherman_agent/tests/tests_weather_agent.py b/playground/weatherman_agent/tests/tests_weather_agent.py new file mode 100644 index 00000000..891da6a6 --- /dev/null +++ b/playground/weatherman_agent/tests/tests_weather_agent.py @@ -0,0 +1,161 @@ +import os +import pytest +from dotenv import load_dotenv +from weather_swarm import Agent +from weather_swarm.prompts import ( + WEATHER_AGENT_SYSTEM_PROMPT, + GLOSSARY_PROMPTS, + FEW_SHORT_PROMPTS, +) +from weather_swarm.tools.tools import ( + point_query, + request_ndfd_basic, + request_ndfd_hourly, +) +from swarms import OpenAIChat +from unittest.mock import Mock, patch + +# Load environment variables for tests +load_dotenv() + + +# Fixtures +@pytest.fixture +def weather_agent(): + return Agent( + agent_name="WeatherMan Agent", + system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, + sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], + llm=OpenAIChat(), + max_loops=1, + dynamic_temperature_enabled=True, + verbose=True, + output_type=str, + tools=[point_query, request_ndfd_basic, request_ndfd_hourly], + docs_folder="datasets", + metadata="json", + function_calling_format_type="OpenAI", + function_calling_type="json", + ) + + +# Test Environment Loading +def test_load_dotenv(): + assert ( + "API_KEY" in os.environ + ), "API_KEY not found in environment variables" + assert ( + "API_SECRET" in os.environ + ), "API_SECRET not found in environment variables" + + +# Test Agent Initialization +def test_agent_initialization(weather_agent): + assert weather_agent.agent_name == "WeatherMan Agent" + assert weather_agent.system_prompt == WEATHER_AGENT_SYSTEM_PROMPT + assert weather_agent.llm is not None + assert len(weather_agent.tools) == 3 + assert weather_agent.max_loops == 1 + assert weather_agent.dynamic_temperature_enabled is True + assert weather_agent.verbose is True + assert weather_agent.output_type == str + assert weather_agent.docs_folder == "datasets" + assert weather_agent.metadata == "json" + assert weather_agent.function_calling_format_type == "OpenAI" + assert weather_agent.function_calling_type == "json" + + +# Parameterized Testing for Agent Tools +@pytest.mark.parametrize( + "tool", [point_query, request_ndfd_basic, request_ndfd_hourly] +) +def test_agent_tools(weather_agent, tool): + assert tool in weather_agent.tools + + +# Mocking the Agent Run Method +@patch.object( + Agent, + "run", + return_value="No, there are no chances of rain today in Huntsville.", +) +def test_agent_run(mock_run, weather_agent): + response = weather_agent.run( + "Are there any chances of rain today in Huntsville?" + ) + assert ( + response + == "No, there are no chances of rain today in Huntsville." + ) + mock_run.assert_called_once_with( + "Are there any chances of rain today in Huntsville?" + ) + + +# Testing Agent's Response Handling +def test_agent_response_handling(weather_agent): + weather_agent.llm = Mock() + weather_agent.llm.return_value = "Mocked Response" + response = weather_agent.run("What's the weather like?") + assert response == "Mocked Response" + + +# Test for Exception Handling in Agent Run +def test_agent_run_exception_handling(weather_agent): + weather_agent.llm = Mock( + side_effect=Exception("Mocked Exception") + ) + with pytest.raises(Exception, match="Mocked Exception"): + weather_agent.run("Will it rain tomorrow?") + + +# Testing Agent Initialization with Missing Parameters +def test_agent_initialization_missing_params(): + with pytest.raises(TypeError): + Agent(agent_name="WeatherMan Agent") + + +# Mocking Environment Variables +@patch.dict( + os.environ, + {"API_KEY": "mock_api_key", "API_SECRET": "mock_api_secret"}, +) +def test_environment_variables(): + load_dotenv() + assert os.getenv("API_KEY") == "mock_api_key" + assert os.getenv("API_SECRET") == "mock_api_secret" + + +# Testing Tools Functionality (Example: point_query) +def test_point_query(): + response = point_query("test_latitude", "test_longitude") + assert ( + response is not None + ) # Replace with more specific assertions based on actual function behavior + + +# Testing Tools Functionality (Example: request_ndfd_basic) +def test_request_ndfd_basic(): + response = request_ndfd_basic("test_latitude", "test_longitude") + assert ( + response is not None + ) # Replace with more specific assertions based on actual function behavior + + +# Testing Tools Functionality (Example: request_ndfd_hourly) +def test_request_ndfd_hourly(): + response = request_ndfd_hourly("test_latitude", "test_longitude") + assert ( + response is not None + ) # Replace with more specific assertions based on actual function behavior + + +# Grouping and Marking Tests +@pytest.mark.slow +def test_slow_functionality(weather_agent): + response = weather_agent.run("Long running query") + assert response is not None # Example placeholder + + +# Test Coverage Report +# Run the following command to generate a coverage report: `pytest --cov=weather_swarm` diff --git a/playground/weatherman_agent/todo/director_agent.py b/playground/weatherman_agent/todo/director_agent.py new file mode 100644 index 00000000..faa5f1fa --- /dev/null +++ b/playground/weatherman_agent/todo/director_agent.py @@ -0,0 +1,279 @@ +from swarms import Agent +from swarms import llama3Hosted +from weather_swarm.prompts import GLOSSARY_PROMPTS +from pydantic import BaseModel, Field + + +# Define the schema for the HierarchicalSwarmRequest +# class HierarchicalSwarmRequest(BaseModel): +# agents: Dict[str, Any] = Field( +# ..., +# description=( +# "The name of the agents and their respective tasks to be" +# " executed hierarchically." +# ), +# examples={ +# "Weather Director Agent": { +# "task": ( +# "Are there any chances of rain today in" +# " Huntsville?" +# ) +# } +# }, +# ) + + +class HierarchicalSwarmRequest(BaseModel): + task: str = Field( + ..., + description="The user's query.", + examples={ + "What is the current temperature at my location?": { + "task": "What is the current temperature at my location?" + } + }, + ) + agent_name: str = Field( + ..., + description="The name of the specialized agent.", + examples={ + "Current Temperature Retrieval Agent": "Current Temperature Retrieval Agent" + }, + ) + + +# Define the schema for the HierarchicalSwarmResponse +def DIRECTOR_SYSTEM_PROMPT() -> str: + return """**Prompt:** + As a director master agent, your task is to communicate with the user, understand their weather-related queries, and delegate the appropriate tasks to specialized worker agents. Each worker agent is specialized in retrieving a specific type of weather data. Your role involves selecting the correct agent or a list of agents, giving them the necessary tasks, and compiling their responses to provide a comprehensive answer to the user. + + **Goal:** + Efficiently manage and delegate tasks to specialized worker agents to gather the necessary weather data and provide a detailed, accurate response to the user. + + **Process:** + 1. **Receive User Query:** + - Understand the user's question or request regarding weather data. + + 2. **Identify Required Data:** + - Determine the type(s) of weather data needed to answer the user's query. + + 3. **Select Appropriate Agents:** + - Choose the specialized agent(s) capable of retrieving the required data. + + 4. **Delegate Tasks:** + - Assign the relevant task to the selected agent(s) using the appropriate inputs. + + 5. **Compile Responses:** + - Gather and compile the data returned by the worker agents into a cohesive response. + + 6. **Respond to User:** + - Provide a detailed and accurate answer to the user based on the compiled data. + + **Worker Agents and Their Specializations:** + 1. **Current Temperature Retrieval Agent** + - Task: Provide the current temperature based on the user's location. + - Required Inputs: User's location (latitude and longitude). + - API Example: `request_metar_nearest("38", "-96")` + + 2. **Current Weather Description Agent** + - Task: Construct a narrative weather description based on current conditions. + - Required Inputs: User's location (latitude and longitude). + - API Example: `request_metar_nearest("38", "-96")` + + 3. **Rainfall Accumulation Agent** + - Task: Provide the accumulated rainfall at the user's location for the last 24 hours. + - Required Inputs: User's location (latitude and longitude). + - API Example: `point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)` + + 4. **Cloud Coverage Forecast Agent** + - Task: Provide the cloud coverage forecast for the user's location for the next day. + - Required Inputs: User's location (latitude and longitude). + - API Example: `request_ndfd_basic(34.730301, -86.586098, forecast_time)` + + 5. **Precipitation Forecast Agent** + - Task: Provide the precipitation forecast for the user's location for the next 6 hours. + - Required Inputs: User's location (latitude and longitude). + - API Example: `point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)` + + 6. **Maximum Temperature Forecast Agent** + - Task: Provide the maximum forecasted temperature for the user's location for today. + - Required Inputs: User's location (latitude and longitude). + - API Example: `request_ndfd_basic(34.730301, -86.586098, forecast_time)` + + 7. **Wind Speed Forecast Agent** + - Task: Provide the maximum wind speed forecast for the user's location for today. + - Required Inputs: User's location (latitude and longitude). + - API Example: `point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)` + + **Example Workflow:** + 1. **User Query:** + - "What is the current temperature and will it rain in the next 6 hours at my location?" + + 2. **Identify Required Data:** + - Current temperature and precipitation forecast. + + 3. **Select Appropriate Agents:** + - Current Temperature Retrieval Agent + - Precipitation Forecast Agent + + 4. **Delegate Tasks:** + - Current Temperature Retrieval Agent: `request_metar_nearest("38", "-96")` + - Precipitation Forecast Agent: `point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)` + + 5. **Compile Responses:** + - Gather responses from both agents. + + 6. **Respond to User:** + - "The current temperature at your location is X degrees. There is/is not expected to be precipitation in the next 6 hours." + + By following this structured approach, you can efficiently manage user queries and provide accurate, detailed weather information. + """ + + +# Define the schema for the HierarchicalSwarmResponse +def DIRECTOR_SCHEMA() -> str: + return """ + + { + "type": "object", + "properties": { + "task_id": { + "type": "string", + "description": "Unique identifier for the task" + }, + "user_query": { + "type": "string", + "description": "The query provided by the user" + }, + "agents": { + "type": "array", + "description": "List of agents to handle the query", + "items": { + "type": "object", + "properties": { + "agent_name": { + "type": "string", + "description": "Name of the specialized agent" + }, + "task": { + "type": "string", + "description": "Task description for the agent" + }, + }, + "required": ["agent_name", "task"] + } + } + }, + "required": ["task_id", "user_query", "agents"] + } + + """ + + +def DIRECTOR_AGENT_CALLING_FEW_SHOT() -> str: + return """ + + { + "task_id": "1", + "user_query": "What is the current temperature at my location?", + "agents": [ + { + "agent_name": "Current Temperature Retrieval Agent", + "task": "Provide the current temperature based on the user's location.", + } + ] + } + + + ########## "What is the current temperature and will it rain in the next 6 hours at my location? ######### + + { + "task_id": "2", + "user_query": "What is the current temperature and will it rain in the next 6 hours at my location?", + "agents": [ + { + "agent_name": "Current Temperature Retrieval Agent", + "task": "Provide the current temperature based on the user's location.", + }, + { + "agent_name": "Precipitation Forecast Agent", + "task": "Provide the precipitation forecast for the user's location for the next 6 hours.", + } + ] + } + + ########### END OF EXAMPLES ########### + + ############# Example 3: Maximum Temperature and Wind Speed Forecast ######### + { + "task_id": "3", + "user_query": "What is the maximum temperature and wind speed forecast for today at my location?", + "agents": [ + { + "agent_name": "Maximum Temperature Forecast Agent", + "task": "Provide the maximum forecasted temperature for the user's location for today.", + }, + { + "agent_name": "Wind Speed Forecast Agent", + "task": "Provide the maximum wind speed forecast for the user's location for today.", + } + ] + } + + + ############ End of Example 3 ############ + + ############ Example 4: Rainfall Accumulation and Cloud Coverage Forecast ######### + { + "task_id": "4", + "user_query": "How much rain fell at my location in the last 24 hours and what is the cloud coverage forecast for tomorrow?", + "agents": [ + { + "agent_name": "Rainfall Accumulation Agent", + "task": "Provide the accumulated rainfall at the user's location for the last 24 hours.", + }, + { + "agent_name": "Cloud Coverage Forecast Agent", + "task": "Provide the cloud coverage forecast for the user's location for the next day.", + } + ] + } + + ############ End of Example 4 ############ + + """ + + +# [C]reate a new agent +agent = Agent( + agent_name="Weather Director Agent", + system_prompt=DIRECTOR_SYSTEM_PROMPT(), + sop_list=[ + GLOSSARY_PROMPTS, + DIRECTOR_SCHEMA(), + DIRECTOR_AGENT_CALLING_FEW_SHOT(), + ], + # sop=list_tool_schemas_json, + llm=llama3Hosted(max_tokens=1000), + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + # interactive=True, + verbose=True, + # Set the output type to the tool schema which is a BaseModel + output_type=str, # or dict, or str + metadata_output_type="json", + # List of schemas that the agent can handle + function_calling_format_type="OpenAI", + function_calling_type="json", # or soon yaml + # return_history=True, +) + +# Run the agent to generate the person's information +generated_data = agent.run( + "Are there any chances of rain today in Huntsville?" +) + +# Print the generated data +print(f"Generated data: {generated_data}") diff --git a/playground/weatherman_agent/todo/worker_agents.py b/playground/weatherman_agent/todo/worker_agents.py new file mode 100644 index 00000000..ed8d090f --- /dev/null +++ b/playground/weatherman_agent/todo/worker_agents.py @@ -0,0 +1,269 @@ +from swarms import Agent +from swarms import llama3Hosted +from pydantic import BaseModel, Field +from weather_swarm.tools.tools import ( + request_metar_nearest, + point_query, + request_ndfd_basic, + point_query_region, + request_ndfd_hourly, +) + + +class WeatherRequest(BaseModel): + """ + A class to represent the weather request. + + Attributes + ---------- + query : str + The user's query. + """ + + task: str = Field(..., title="The user's query") + tool: str = Field(None, title="The tool to execute") + + +def current_temperature_retrieval_agent(): + return """ + ### Current Temperature Retrieval Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the current temperature based on the user's location. Ensure accuracy and up-to-date information. + + **Goal:** + Allow the user to request the current temperature for their location. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + request_metar_nearest("38", "-96") + """ + + +def current_weather_description_agent(): + return """ + ### Current Weather Description Agent + + **Prompt:** + As a specialized weather data agent, your task is to construct a narrative weather description based on the current conditions at the user's location. + + **Goal:** + Have the LLM construct a narrative weather description based on current conditions. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + request_metar_nearest("38", "-96") + """ + + +def rainfall_accumulation_agent(): + return """ + ### Rainfall Accumulation Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the accumulated rainfall at the user's location for the last 24 hours. + + **Goal:** + Allow the user to determine how much rain has accumulated at their location in the last 24 hours. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4) + """ + + +def cloud_coverage_forecast_agent(): + return """ + ### Cloud Coverage Forecast Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the cloud coverage forecast for the user's location for the next day. + + **Goal:** + Allow the user to determine cloud coverage for their location. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + request_ndfd_basic(34.730301, -86.586098, forecast_time) + """ + + +def precipitation_forecast_agent(): + return """ + ### Precipitation Forecast Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the precipitation forecast for the user's location for the next 6 hours. + + **Goal:** + Allow the user to determine if precipitation will fall in the coming hours. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4) + """ + + +def maximum_temperature_forecast_agent(): + return """ + ### Maximum Temperature Forecast Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the maximum forecasted temperature for the user's location for today. + + **Goal:** + Allow the user to determine how hot or cold the air temperature will be. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + request_ndfd_basic(34.730301, -86.586098, forecast_time) + """ + + +def wind_speed_forecast_agent(): + return """ + ### Wind Speed Forecast Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the maximum wind speed forecast for the user's location for today. + + **Goal:** + Allow the user to determine the maximum wind speed for that day. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4) + """ + + +llm = llama3Hosted( + max_tokens=1000, + temperature=0.5, +) + + +# Define the agents with their specific prompts +temp_tracker = Agent( + agent_name="TempTracker", + system_prompt=current_temperature_retrieval_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[request_metar_nearest], +) + +weather_narrator = Agent( + agent_name="WeatherNarrator", + system_prompt=current_weather_description_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[request_metar_nearest], +) + +rain_gauge = Agent( + agent_name="RainGauge", + system_prompt=rainfall_accumulation_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[point_query], +) + +cloud_predictor = Agent( + agent_name="CloudPredictor", + system_prompt=cloud_coverage_forecast_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[request_ndfd_basic], +) + +rain_forecaster = Agent( + agent_name="RainForecaster", + system_prompt=precipitation_forecast_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[point_query_region], +) + +temp_forecaster = Agent( + agent_name="TempForecaster", + system_prompt=maximum_temperature_forecast_agent(), + llm=llm, + max_loops=1, + verbose=True, + output_type=dict, + autosave=True, + dashboard=False, + streaming_on=True, + stopping_token="", + tools=[request_ndfd_hourly], +) + +wind_watcher = Agent( + agent_name="WindWatcher", + system_prompt=wind_speed_forecast_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[point_query_region], +) + +# Create a list +agents = [ + temp_tracker, + weather_narrator, + rain_gauge, + cloud_predictor, + rain_forecaster, + temp_forecaster, + wind_watcher, +] + +# # Create a hierarchical swarm +# swarm = HiearchicalSwarm( +# name = "WeatherSwarm", +# description="A swarm of weather agents", +# agents=agents, +# director = +# ) diff --git a/playground/weatherman_agent/weather_agent.py b/playground/weatherman_agent/weather_agent.py new file mode 100644 index 00000000..998b8922 --- /dev/null +++ b/playground/weatherman_agent/weather_agent.py @@ -0,0 +1,50 @@ +from dotenv import load_dotenv +from swarms import Agent, OpenAIChat + +from weather_swarm.prompts import ( + FEW_SHORT_PROMPTS, + GLOSSARY_PROMPTS, + WEATHER_AGENT_SYSTEM_PROMPT, +) +from weather_swarm.tools.tools import ( + point_query, + request_ndfd_basic, + request_ndfd_hourly, +) + +# Load the environment variables +load_dotenv() + + +# Purpose = To generate weather information for the user and send API requests to the Baron Weather API +agent = Agent( + agent_name="WeatherMan Agent", + system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, + sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], + # sop=list_tool_schemas_json, + llm=OpenAIChat(), + max_loops=1, + # interactive=True, + dynamic_temperature_enabled=True, + verbose=True, + # Set the output type to the tool schema which is a BaseMode + output_type=str, # or dict, or str + tools=[ + # request_metar_nearest, + point_query, + request_ndfd_basic, + # point_query_region, + request_ndfd_hourly, + ], + docs_folder="datasets", # Add every document in the datasets folder + metadata="json", + function_calling_format_type="OpenAI", + function_calling_type="json", +) + +# Run the agent to generate the person's information +# Run the agent to generate the person's information +output = agent.run("Are there any chances of rain today in Huntsville?") +# # Write the output to a new file +# with open('output.txt', 'w') as f: +# f.write(str(output)) \ No newline at end of file diff --git a/playground/weatherman_agent/weather_swarm/__init__.py b/playground/weatherman_agent/weather_swarm/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/playground/weatherman_agent/weather_swarm/prompts.py b/playground/weatherman_agent/weather_swarm/prompts.py new file mode 100644 index 00000000..83c32499 --- /dev/null +++ b/playground/weatherman_agent/weather_swarm/prompts.py @@ -0,0 +1,152 @@ +GLOSSARY_PROMPTS = """ + +Glossary + +API Terminology +Access_key +A private access key or shared access key (not a secret and not an Application Key) used to access the Baron Weather API. View your access keys on your account page. + +Application Key +Users’ personal and confidential key from which access keys are derived. The application key allows management of access keys. View your application key on your account page. + +Configuration_code +Configuration codes are most often used to differentiate between EPSG:3857 (Mercator) and EPSG:4326 (Geodetic) projections. In the Baron Weather API we add a descriptor to the beginning to indicate any additional parameters to the projection. The default descriptor is ‘Standard’ and will be the primary configuration used, but some data products may offer alternative descriptor to differentiate formatting options. + +Coordinated Universal Time (UTC) +This standard organizes the data so the largest temporal term (the year) appears first in the data string and progresses to the smallest term (the second), like so 2012-12-31TI8:51:23Z. + +Format +The language format for API responses. In the Baron Weather API, responses for text products can be in JSON or JSONP format, and graphical formats are always in png format. + +ISO8601 +The primary time standard by which the world regulates clocks and time. + +Max-age +It's an optional parameter for the metar, buoy, and cwop "nearest" api which allows developers to query a lat/lon and only get back data is more recent than the prescribed date and time. + +Metadata_timestamp +The ISO 8601 UTC date/time for the data found in the returned metadata "time" parameter(s). + +Metadata_valid_time +The ISO 8601 UTC date/time for the data found in the returned metadata "valid_times" list. This is required for forecast products (those that provide a valid_times list in the metadata), but unnecessary for non-forecast products. + +Pages +The page parameter was put in place to minimize the amount of information returned in the response. Text products that support the page parameter return the current page number and the total number of pages when you make a request. Many text products provide thousands of lines of data, which can be overwhelming when users are looking for a specific piece of information for a specific time frame. For example, a developers looking for the current weather conditions at all METAR stations will not need to have thousands of lines of text returned. Instead, we limit them to a maximum number of stations per page, then if users want the full set, they have to ask explicitly for page 2, page 3, etc. in the request URL. + +Product Code +The code to include in the API URL request that is specific to each weather product. + +Reference Time +The time the forecast model begins. In the product-instances metadata, this is called "time". + +Timestamp +The timestamp value included with the request and used to create the signature. Represented as ‘ts’ in request and always in UTC format. + +Timestep +In general, a single point in time for which the product is valid, also called "valid_times". However for accumulation products, the timesteps represent the end of a measured time interval for which total accumulated precipitation is forecast. A list of timesteps or "valid_times" are provided In the product-instances metadata. + +Timestep Interval +The interval between timesteps. + +Valid_times +The list of UTC-formatted timesteps for a forecast product when the Product Instances API is run. + +X +The x-coordinate of the requested tile. This value represents the horizontal index of the tile, assuming an origin of the lower left corner of the tile grid (0,0). These coordinates correspond to the Tile Map Service Specification. + +Y +The y-coordinate of the requested tile. This value represents the vertical index of the tile, assuming an origin of the lower left corner of the tile grid (0,0). These coordinates correspond to the Tile Map Service Specification. + +Z +The z-coordinate of the requested tile. This value represents the zoom level (depth) of the tile. A value of 0 shows the entire world using the minimum number amount of tiles (1 for Mercator, 2 for Geodetic). The maximum available depth may vary by product. These coordinates correspond to the Tile Map Service Specification. + + + + +Meteorological Terminology +dBZ +Stands for decibels relative to Z. It is a meteorological measure of equivalent reflectivity (Z) of a radar signal reflected off a remote object. + +Dew Point +The temperature below which the water vapor in a volume of humid air at a constant barometric pressure will condense into liquid water. + +Heat Index +An index that combines air temperature and relative humidity in an attempt to determine the human-perceived equivalent temperature — how hot it feels. + +Infrared (IR) +In relation to satellite imagery, infrared imagery is produced by satellite analysis of infrared wavelengths. This analysis indicates the temperature of air masses, making it possible to identify cloud cover day or night. + +kft +Stands for thousands of feet. + +Relative Humidity +The ratio of the partial pressure of water vapor in an air-water mixture to the saturated vapor pressure of water at a given temperature. + +Valid Time Event Code (VTEC) +Format in which alerting information is pulled from the National Weather Service. + +Visible Satellite (VIS) +Visible satellite imagery is a snapshot of cloud cover from space. Consequently it is only usable during daylights hours. It is the easiest weather data product for laypeople to understand. + +Warnings +The NWS issues a warning when a hazardous weather or hydrologic event is occurring, is imminent, or has a very high probability of occurring. Often warnings are not issued until conditions have been visually verified. A warning is used for conditions posing a threat to life or property. + +Watches +The NWS issues a watch when the risk of a hazardous weather or hydrologic event has increased significantly, but its occurrence, location, and/or timing is still uncertain. It is intended to provide enough lead time so that those who need to set their plans in motion can do so. + +Water Vapor Satellite +Water vapor imagery is a satellite product which measures the amount of moisture in the atmosphere above 10,000 feet. Bright white areas indicate abundant moisture, which may be converted into clouds or precipitation. Darker areas indicate the presence of drier air. In addition to measuring moisture, water vapor imagery is useful in detecting large scale weather patterns, such as jet streams. + +Wave Dominant Period +The period in seconds between successive waves. + +Wave Height +The maximum reported or forecasted wave height. + +Wind Chill +The perceived decrease in air temperature felt by the body on exposed skin due to the flow of cold air. Wind chill temperature is defined only for temperatures at or below 10 °C (50 °F) and wind speeds above 4.8 kilometers per hour (3.0 mph). + +Wind Gust +A sudden, brief increase in speed of wind. According to US weather observing practice, gusts are reported when the peak wind speed reaches at least 16 knots and the variation in wind speed between the peaks and lulls is at least 9 knots. The duration of a gust is usually less than 20 seconds. + +""" + +WEATHER_AGENT_SYSTEM_PROMPT = """ + +You navigate through tasks efficiently. Whether you're learning something new or need assistance with daily tasks, I can provide information, suggestions, and step-by-step guidance. + +#### How I Can Help: +- **Information Retrieval:** I can fetch and summarize information on a wide range of topics. +- **Problem Solving:** I offer solutions and strategies to address specific challenges. +- **Learning Support:** I assist in understanding new concepts and procedures. + +#### Example: Using the Baron Weather API + +Let's look at how you can use the Baron Weather API to retrieve weather data, which involves making authenticated HTTP requests. + +1. **Understand Your Needs**: Identify what specific weather data you need, such as current conditions or a forecast. +2. **Gather API Details**: Know your API key, the endpoints available, and the data format (JSON). +3. **Authentication**: Learn how to authenticate your requests using your API key and additional security measures as required (like generating signatures). +4. **Craft the Request**: Construct the correct HTTP request to fetch the data you need. +5. **Parse the Response**: After making the request, interpret the JSON response to extract and utilize the weather data. + +Through each step, I can provide explanations, code snippets, and troubleshooting tips to ensure you successfully achieve your goal. + +### Conclusion + +With these steps, you'll be better prepared to use tools like APIs effectively and get the most out of our interactions. If you have questions or need further assistance, feel free to ask! + +--- + +""" + + +FEW_SHORT_PROMPTS = """ +What is the current temperature? allow the user to request the current temperature for their location user's location request_metar_nearest("38", "-96") +Describe the current weather. have the LLM construct a narrative weather description based on current conditions user's location request_metar_nearest("38", "-96") +How much rain fell at my location? allow the user to determine how much rain has accumulated at their location in the last 24 hours user's location point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4) +Is it going to be sunny tomorrow? allow the user to determine cloud coverage for their location user's location request_ndfd_basic(34.730301, -86.586098, forecast_time) +Is rain expected at my location in the next 6 hours? allow the user to determine if precip will fall in the coming hours user's location point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4) +What is the max forecasted temperature today? allow the user to determine how hot or cold the air temp will be user's location request_ndfd_basic(34.730301, -86.586098, forecast_time) +Will it be windy today? allow the user to determine the max wind speed for that day user's location point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4) +""" diff --git a/playground/weatherman_agent/weather_swarm/tools/__init__.py b/playground/weatherman_agent/weather_swarm/tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py b/playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py new file mode 100644 index 00000000..fba0361b --- /dev/null +++ b/playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py @@ -0,0 +1,145 @@ +from pydantic import BaseModel, Field + + +class RequestMetarNearest(BaseModel): + latitude: str = Field( + ..., + description=( + "The latitude of the location for which the nearest METAR" + " station is requested." + ), + ) + longitude: str = Field( + ..., + description=( + "The longitude of the location for which the nearest" + " METAR station is requested." + ), + ) + + +class PointQueryPrecipTotalAccum24Hr(BaseModel): + layer: str = Field( + ..., + description=( + "The layer of the precipitation total accumulation in the" + " last 24 hours." + ), + ) + projection: str = Field( + ..., + description=( + "The projection of the location for which the" + " precipitation total accumulation is requested." + ), + ) + longitude: float = Field( + ..., + description=( + "The longitude of the location for which the" + " precipitation total accumulation is requested." + ), + ) + latitude: float = Field( + ..., + description=( + "The latitude of the location for which the precipitation" + " total accumulation is requested." + ), + ) + + +class RequestNDFDBasic(BaseModel): + latitude: float = Field( + ..., + description=( + "The latitude of the location for which the NDFD basic" + " forecast is requested." + ), + ) + longitude: float = Field( + ..., + description=( + "The longitude of the location for which the NDFD basic" + " forecast is requested." + ), + ) + forecast_time: str = Field( + ..., + description=( + "The forecast time for which the NDFD basic forecast is" + " requested." + ), + ) + + +class PointQueryBaronHiresMaxReflectivityDbzAll(BaseModel): + layer: str = Field( + ..., + description=( + "The layer of the maximum reflectivity in dBZ for all" + " heights." + ), + ) + projection: str = Field( + ..., + description=( + "The projection of the location for which the maximum" + " reflectivity is requested." + ), + ) + longitude: float = Field( + ..., + description=( + "The longitude of the location for which the maximum" + " reflectivity is requested." + ), + ) + latitude: float = Field( + ..., + description=( + "The latitude of the location for which the maximum" + " reflectivity is requested." + ), + ) + + +class PointQueryBaronHiresWindSpeedMph10Meter(BaseModel): + layer: str = Field( + ..., + description=( + "The layer of the wind speed in mph at 10 meters above" + " ground level." + ), + ) + projection: str = Field( + ..., + description=( + "The projection of the location for which the wind speed" + " is requested." + ), + ) + longitude: float = Field( + ..., + description=( + "The longitude of the location for which the wind speed" + " is requested." + ), + ) + latitude: float = Field( + ..., + description=( + "The latitude of the location for which the wind speed is" + " requested." + ), + ) + + +def _remove_a_key(d: dict, remove_key: str) -> None: + """Remove a key from a dictionary recursively""" + if isinstance(d, dict): + for key in list(d.keys()): + if key == remove_key and "type" in d.keys(): + del d[key] + else: + _remove_a_key(d[key], remove_key) diff --git a/playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py b/playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py new file mode 100644 index 00000000..a5d5f1ba --- /dev/null +++ b/playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py @@ -0,0 +1,109 @@ +import requests +from typing import List, Dict, Any + + +def fetch_geocode_by_city( + api_key: str, city: str, timestamp: int, signature: str +) -> List[Dict[str, Any]]: + """ + Fetch geocode data by city name. + + Args: + api_key (str): The API key for authentication. + city (str): The name of the city (e.g., "Austin, Tx"). + timestamp (int): The timestamp for the request. + signature (str): The signature for the request. + + Returns: + List[Dict[str, Any]]: Geocode data for the specified city. + + Raises: + Exception: If the request fails or the response is invalid. + """ + url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/city.json" + params = {"name": city, "ts": timestamp, "sig": signature} + try: + response = requests.get(url, params=params) + response.raise_for_status() + data = response.json() + return data.get("geocode", {}).get("data", []) + except requests.RequestException as e: + raise Exception(f"Failed to fetch geocode data by city: {e}") + except ValueError: + raise Exception("Invalid response format.") + + +def fetch_geocode_by_address( + api_key: str, address: str, timestamp: int, signature: str +) -> List[Dict[str, Any]]: + """ + Fetch geocode data by address. + + Args: + api_key (str): The API key for authentication. + address (str): The address (e.g., "3305 Northland Dr, Austin, Tx"). + timestamp (int): The timestamp for the request. + signature (str): The signature for the request. + + Returns: + List[Dict[str, Any]]: Geocode data for the specified address. + + Raises: + Exception: If the request fails or the response is invalid. + """ + url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/address.json" + params = {"location": address, "ts": timestamp, "sig": signature} + try: + response = requests.get(url, params=params) + response.raise_for_status() + data = response.json() + return data.get("geocode", {}).get("data", []) + except requests.RequestException as e: + raise Exception( + f"Failed to fetch geocode data by address: {e}" + ) + except ValueError: + raise Exception("Invalid response format.") + + +def fetch_geocode_by_zip( + api_key: str, + zip_code: str, + us: int, + timestamp: int, + signature: str, +) -> List[Dict[str, Any]]: + """ + Fetch geocode data by zip code. + + Args: + api_key (str): The API key for authentication. + zip_code (str): The zip code (e.g., "13060"). + us (int): Indicator for US zip code (1 for US, 0 for other). + timestamp (int): The timestamp for the request. + signature (str): The signature for the request. + + Returns: + List[Dict[str, Any]]: Geocode data for the specified zip code. + + Raises: + Exception: If the request fails or the response is invalid. + """ + url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/zip.json" + params = { + "zip": zip_code, + "us": us, + "ts": timestamp, + "sig": signature, + } + try: + response = requests.get(url, params=params) + response.raise_for_status() + data = response.json() + return data.get("geocode", {}).get("data", []) + except requests.RequestException as e: + raise Exception( + f"Failed to fetch geocode data by zip code: {e}" + ) + except ValueError: + raise Exception("Invalid response format.") diff --git a/playground/weatherman_agent/weather_swarm/tools/tools.py b/playground/weatherman_agent/weather_swarm/tools/tools.py new file mode 100644 index 00000000..68b20f44 --- /dev/null +++ b/playground/weatherman_agent/weather_swarm/tools/tools.py @@ -0,0 +1,1281 @@ +# coding: utf-8 + +import base64 +import hashlib +import hmac +import shutil +import time +from urllib.request import urlopen +from urllib.request import Request +from urllib.error import URLError +import os +import json +import codecs +from dotenv import load_dotenv +import datetime + +from typeguard import typechecked +from typing import Union + +load_dotenv() + +latin1 = codecs.lookup("latin-1") + +host = os.environ.get( + "BARON_API_HOST", "http://api.velocityweather.com/v1" +) +access_key = os.environ.get("BARON_ACCESS_KEY", "Y5lHXZfgce7P") +access_key_secret = os.environ.get( + "BARON_ACCESS_KEY_SECRET", + "rcscpInzyLuweENUjUtFDmqLkK1N0EPeaWQRjy7er1", +) + + +@typechecked +def a2w(a: bytes) -> str: + """ + Decodes a byte string using Latin-1 encoding and returns the first character of the decoded string. + + Args: + a (bytes): The byte string to be decoded. + + Returns: + str: The first character of the decoded string. + """ + return latin1.decode(a)[0] + + +@typechecked +def sig(key: str, secret: str) -> str: + """ + Generates a signed string using HMAC-SHA1 and base64 encoding. + + Args: + key (str): The key used for signing. + secret (str): The secret used for signing. + + Returns: + str: The signed string in the format "sig={signature}&ts={timestamp}". + """ + + ts = "{:.0f}".format(time.time()) + to_sign = key + ":" + ts + hashval = hmac.new( + secret.encode("utf-8"), to_sign.encode("utf-8"), hashlib.sha1 + ) + sig = a2w( + base64.urlsafe_b64encode(hashval.digest()).replace( + b"=", b"%3D" + ) + ) + return "sig={}&ts={}".format(sig, ts) + + +@typechecked +def sign_request(url: str, key: str, secret: str) -> str: + """ + Returns a signed URL by appending the signature and timestamp. + + Args: + url (str): The URL to be signed. + key (str): The key used for signing. + secret (str): The secret used for signing. + + Returns: + str: The signed URL with the signature and timestamp appended as query parameters. + """ + + """Returns signed url""" + + signature = sig(key, secret) + q = "?" if url.find("?") == -1 else "&" + url += "{}{}".format(q, signature) + return url + + +########## [START] API REQUESTS ########## +@typechecked +def request_pointquery_nws_watches_warning_all() -> str: + """ + Constructs a URL for querying all NWS watches and warnings for a specific point and signs the request. + + Returns: + str: The signed URL for the point query. + """ + + uri = "/reports/alert/all-poly/point.json?lat=29.70&lon=-80.41" + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_lightning_count() -> str: + """ + Constructs a URL for querying the count of lightning strikes in a specified region and signs the request. + + Returns: + str: The signed URL for the lightning count query. + """ + + uri = "/reports/lightning/count/region.json?w_lon=-160&e_lon=0&n_lat=-2&s_lat=-70" + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_storm_vector(sitecode: str) -> str: + """ + Constructs a URL for querying the storm vector for a specific site and signs the request. + + Args: + sitecode (str): The code of the site for which the storm vector is being queried. + + Returns: + str: The signed URL for the storm vector query. + """ + + uri = "/reports/stormvector/station/%s.json" % (sitecode) + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_geocodeip() -> str: + """ + Constructs a URL for querying the geocode information of an IP address and signs the request. + + Returns: + str: The signed URL for the geocode IP query. + """ + + uri = "/reports/geocode/ipaddress.json" + url = "%s/%s%s" % (host, access_key, uri) + url = sign_request(url, access_key, access_key_secret) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_forecast(lat: float, lon: float) -> dict: + """ + Constructs a URL for querying a 7-day point forecast for a specific latitude and longitude, signs the request, and retrieves the forecast data. + + Args: + lat (float): The latitude for the forecast query. + lon (float): The longitude for the forecast query. + + Returns: + dict: The forecast data for the specified point if the request is successful, otherwise an empty dictionary. + """ + uri = "/reports/pointforecast/basic.json?days=7&lat={}&lon={}".format(lat, lon) + url = "%s/%s%s" % (host, access_key, uri) + url = sign_request(url, access_key, access_key_secret) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return {} + except ValueError as e: + print(e) + return {} + + assert response.code == 200 + data = json.loads(response.read()) + + forecast_data = data.get("pointforecast_basic", {}).get("data", {}) + if isinstance(forecast_data, dict): + return forecast_data + else: + return {"forecast_data": forecast_data} + + +@typechecked +def request_metar_northamerica() -> None: + """ + Constructs a URL for querying METAR data for North America, signs the request, and retrieves the data. + Processes the METAR data and associated forecasts, then saves the data to a JSON file. + + Returns: + None + """ + + uri = "/reports/metar/region.json?n_lat=51.618017&s_lat=23.241346&w_lon=-129.375000&e_lon=-60.644531" + url = "%s/%s%s" % (host, access_key, uri) + url = sign_request(url, access_key, access_key_secret) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + + assert response.code == 200 + data = json.loads(response.read()) + + metars = {} + pages = data["metars"]["meta"]["pages"] + + print("processing {} pages of METAR data".format(pages)) + + for i in range(1, pages + 1): + print("processing page {}".format(i)) + page_url = url + "&page={}".format(i) + try: + response = urlopen(page_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + + assert response.code == 200 + data = json.loads(response.read()) + for metar in data["metars"]["data"]: + siteid = metar["station"]["id"] + print("processing site {}".format(siteid)) + forecast = request_forecast( + metar["station"]["coordinates"][1], + metar["station"]["coordinates"][0], + ) + + metars[siteid] = {"metar": metar, "forecast": forecast} + + with open("metar.json", "w") as metar_jsonfile: + json.dump(metars, metar_jsonfile, indent=4, sort_keys=True) + + + +@typechecked +def request_metar_nearest(lat: str, lon: str): + """ + Requests the nearest METAR (Meteorological Aerodrome Report) data based on the given latitude and longitude. + + Args: + lat (str): The latitude of the location. + lon (str): The longitude of the location. + + Returns: + str: The signed request URL for retrieving the METAR data. + """ + uri = ( + "/reports/metar/nearest.json?lat=%s&lon=%s&within_radius=500&max_age=75" + % ( + lat, + lon, + ) + ) + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_metar(station_id: str) -> str: + """ + Constructs a URL for querying METAR data for a specific station and signs the request. + + Args: + station_id (str): The ID of the station for which the METAR data is being queried. + + Returns: + str: The signed URL for the METAR query. + """ + + uri = "/reports/metar/station/%s.json" % station_id + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_ndfd_hourly(lat: float, lon: float, utc_datetime: datetime.datetime) -> str: + """ + Requests NDFD hourly data for a specific latitude, longitude, and UTC datetime. + + Args: + lat (float): The latitude of the location. + lon (float): The longitude of the location. + utc_datetime (datetime.datetime): The UTC datetime for the request. + + Returns: + str: The signed URL for the request. + """ + datetime_str = ( + utc_datetime.replace(microsecond=0).isoformat() + "Z" + ) + uri = f"/reports/ndfd/hourly.json?lat={lat}&lon={lon}&utc={datetime_str}" + url = f"{host}/{access_key}{uri}" + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_ndfd_basic(lat: float, lon: float, utc_datetime: datetime.datetime) -> str: + """ + Requests NDFD basic data for a specific latitude, longitude, and UTC datetime. + + Args: + lat (float): The latitude of the location. + lon (float): The longitude of the location. + utc_datetime (datetime.datetime): The UTC datetime for the request. + + Returns: + str: The signed URL for the request. + """ + + datetime_str = ( + utc_datetime.replace(microsecond=0).isoformat() + "Z" + ) + uri = f"/reports/ndfd/basic.json?lat={lat}&lon={lon}&utc={datetime_str}&days=7" + url = f"{host}/{access_key}{uri}" + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_tile(product: str, product_config: str, z: int, x: int, y: int) -> None: + """ + Requests a tile for a specific product and configuration, retrieves the data, and saves it as a PNG file. + + Args: + product (str): The product name. + product_config (str): The product configuration. + z (int): The zoom level. + x (int): The tile's x coordinate. + y (int): The tile's y coordinate. + + Returns: + None + """ + + url = "%s/%s/meta/tiles/product-instances/%s/%s" % ( + host, + access_key, + product, + product_config, + ) + url = sign_request(url, access_key, access_key_secret) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + data = json.loads(response.read()) + + # Select the most recent product instance for this example. + product_instance = data[0] + + url = "%s/%s/tms/1.0.0/%s+%s+%s/%d/%d/%d.png" % ( + host, + access_key, + product, + product_config, + product_instance["time"], + z, + x, + y, + ) + + try: + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + url += "?valid_time={}".format( + product_instance["valid_times"][0] + ) + except KeyError: + pass + + url = sign_request(url, access_key, access_key_secret) + print(url) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + print("headers:") + print( + json.dumps( + response.headers._headers, indent=4, sort_keys=True + ) + ) + + content = response.read() + filename = "./tms_img_{}_{}.png".format(product, product_config) + print( + "Read {} bytes, saving as {}".format(len(content), filename) + ) + with open(filename, "wb") as f: + f.write(content) + + +@typechecked +def point_query(product: str, product_config: str, lon: float, lat: float) -> None: + """ + Queries the most recent 'time' and, if applicable, 'valid_time' for a given product and product configuration at a specified longitude and latitude point. + + Args: + product (str): The product name. + product_config (str): The product configuration. + lon (float): The longitude of the location. + lat (float): The latitude of the location. + + Returns: + None + """ + # Get the list of product instances. + url = "{host}/{key}/meta/tiles/product-instances/{product}/{product_config}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + data = json.loads(response.read()) + + # Select the most recent product instance for this example. + product_instance = data[0] + + # Query our lon, lat point. + url = "{host}/{key}/point/{product}/{product_config}/{product_instance}.{file_type}?lon={lon}&lat={lat}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + product_instance=product_instance["time"], + file_type="json", + lon=lon, + lat=lat, + ) + + try: + if product_instance["valid_times"][0]: + # If it's a forecast product, it will have valid_times. Display them all + url += "&valid_time=*" + + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + # url += '&valid_time={}'.format(product_instance['valid_times'][0]) + + except KeyError: + pass + + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = response.read() + charset = response.headers.get_param("charset") + if charset: + content = content.decode(charset) + content = json.loads(content) + + print("headers:") + print( + json.dumps( + response.headers._headers, indent=4, sort_keys=True + ) + ) + print("content:") + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def point_query_multi(product: str, product_config: str, points: 'list[tuple[float, float]]') -> None: + """ + For the given product and product_config, queries the most recent 'time' + (and most recent 'valid_time' if it's a forecast product) for a list of points. + + Args: + product (str): The product name. + product_config (str): The product configuration. + points (list[tuple[float, float]]): A list of tuples, each containing the longitude and latitude of a point. + + Returns: + None + """ + + """ + For the given product and product_config, queries the most recent 'time' + (and most recent 'valid_time' if it's a forecast product). + """ + + # Get the list of product instances. + url = "{host}/{key}/meta/tiles/product-instances/{product}/{product_config}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + data = json.loads(response.read()) + + # Select the most recent product instance for this example. + product_instance = data[0] + + def format_point(_p, _decimals=3): + return ",".join(str(round(_, _decimals)) for _ in _p) + + # Query our list of lon, lat points + url = "{host}/{key}/point/multi/{product}/{product_config}/{product_instance}.{file_type}?points={points}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + product_instance=product_instance["time"], + file_type="json", + points="|".join(format_point(_) for _ in points), + ) + + try: + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + url += "&valid_time={}".format( + product_instance["valid_times"][0] + ) + except KeyError: + pass + + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + request = Request(url, headers={"Accept-Encoding": "gzip"}) + response = urlopen(request) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + if response.headers.get("Content-Encoding") == "gzip": + import gzip + import io + + compressed_file = io.BytesIO(response.read()) + decompressed_file = gzip.GzipFile(fileobj=compressed_file, mode="rb") + content = decompressed_file.read() + else: + content = response.read() + + charset = response.headers.get_param("charset") + if charset: + content = content.decode(charset) + content = json.loads(content) + + print("headers:") + print( + json.dumps( + response.headers._headers, indent=4, sort_keys=True + ) + ) + print("content:") + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def point_query_region(product: str, product_config: str, n_lat: float, s_lat: float, w_lon: float, e_lon: float) -> None: + """ + For the given product and product_config, queries the most recent 'time' + (and most recent 'valid_time' if it's a forecast product) for a specific region. + + Args: + product (str): The product name. + product_config (str): The product configuration. + n_lat (float): The northern latitude of the region. + s_lat (float): The southern latitude of the region. + w_lon (float): The western longitude of the region. + e_lon (float): The eastern longitude of the region. + + Returns: + None + """ + + """ + For the given product and product_config, queries the most recent 'time' + (and most recent 'valid_time' if it's a forecast product). + """ + + # Get the list of product instances. + url = "{host}/{key}/meta/tiles/product-instances/{product}/{product_config}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + ) + + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + data = json.loads(response.read()) + + # Select the most recent product instance for this example. + product_instance = data[0] + + def format_value(_, _decimals=3): + return str(round(_, _decimals)) + + # Query our region + url = "{host}/{key}/point/region/{product}/{product_config}/{product_instance}.{file_type}?n_lat={n_lat}&s_lat={s_lat}&w_lon={w_lon}&e_lon={e_lon}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + product_instance=product_instance["time"], + file_type="json", + n_lat=format_value(n_lat), + s_lat=format_value(s_lat), + w_lon=format_value(w_lon), + e_lon=format_value(e_lon), + ) + + try: + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + url += "&valid_time={}".format( + product_instance["valid_times"][0] + ) + except KeyError: + pass + + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + request = Request(url, headers={"Accept-Encoding": "gzip"}) + response = urlopen(request) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + if response.headers.get("Content-Encoding") == "gzip": + import gzip + import io + + compressed_file = io.BytesIO(response.read()) + decompressed_file = gzip.GzipFile(fileobj=compressed_file, mode="rb") + content = decompressed_file.read() + else: + content = response.read() + + charset = response.headers.get_param("charset") + if charset: + content = content.decode(charset) + content = json.loads(content) + + print("headers:") + print( + json.dumps( + response.headers._headers, indent=4, sort_keys=True + ) + ) + print("content:") + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def request_wms_capabilities(product: str, product_config: str) -> None: + """ + Requests WMS capabilities for a specific product and product configuration, signs the request, and prints the response content. + + Args: + product (str): The product name. + product_config (str): The product configuration. + + Returns: + None + """ + + url = "{}/{}/wms/{}/{}?VERSION=1.3.0&SERVICE=WMS&REQUEST=GetCapabilities".format( + host, access_key, product, product_config + ) + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = response.read() + print(content) + + +@typechecked +def request_wms(product: str, product_config: str, image_size_in_pixels: 'list[int]', image_bounds: 'list[float]') -> None: + """ + Requests a WMS image and saves it to disk in the current directory. + + Args: + product (str): The product code, such as 'C39-0x0302-0'. + product_config (str): The product configuration, such as 'Standard-Mercator' or 'Standard-Geodetic'. + image_size_in_pixels (list[int]): The image width and height in pixels, such as [1024, 1024]. + image_bounds (list[float]): The bounds of the image. See below for details depending on the projection. + + A. If requesting a Mercator (EPSG:3857) image: + 1. The coordinates must be in meters. + 2. The WMS 1.3.0 spec requires the coordinates be in this order [xmin, ymin, xmax, ymax]. + 3. As an example, to request the whole world, you would use [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]. + Because this projection stretches to infinity as you approach the poles, the ymin and ymax values + are clipped to the equivalent of -85.05112877980659 and 85.05112877980659 latitude, not -90 and 90 latitude, + resulting in a perfect square of projected meters. + B. If requesting a Geodetic (EPSG:4326) image: + 1. The coordinates must be in decimal degrees. + 2. The WMS 1.3.0 spec requires the coordinates be in this order [lat_min, lon_min, lat_max, lon_max]. + 3. As an example, to request the whole world, you would use [-90, -180, 90, 180]. + + Theoretically it is possible to request any arbitrary combination of image_size_in_pixels and image_bounds, + but this is not advisable and is actually discouraged. It is expected that the proportion you use for + image_width_in_pixels/image_height_in_pixels is equal to image_width_bounds/image_height_bounds. If this is + not the case, you have most likely done some incorrect calculations. It will result in a distorted (stretched + or squished) image that is incorrect for the requested projection. One fairly obvious sign that your + proportions don't match up correctly is that the image you receive from your WMS request will have no + smoothing (interpolation), resulting in jaggy or pixelated data. + + Returns: + None + """ + # Convert the image bounds to a comma-separated string. + image_bounds_str = ",".join(str(x) for x in image_bounds) + + # We're using the TMS-style product instances API here for simplicity. If you + # are using a standards-compliant WMS client, do note that we also provide a + # WMS-style API to retrieve product instances which may be more suitable to your + # needs. See our documentation for details. + + # For this example, we use the optional parameter "page_size" to limit the + # list of product instances to the most recent instance. + meta_url = ( + "{}/{}/meta/tiles/product-instances/{}/{}?page_size=1".format( + host, access_key, product, product_config + ) + ) + meta_url = sign_request(meta_url, access_key, access_key_secret) + + try: + response = urlopen(meta_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + # Decode the product instance response and get the most recent product instance time, + # to be used in the WMS image request. + content = json.loads(response.read()) + product_instance = content[0] + + # WMS uses EPSG codes, while our product configuration code uses 'Geodetic' or + # 'Mercator'. We map between the two here to prepare for the WMS CRS query parameter. + epsg_code = ( + "EPSG:4326" + if product_config.endswith("-Geodetic") + else "EPSG:3857" + ) + + wms_url = "{}/{}/wms/{}/{}?VERSION=1.3.0&SERVICE=WMS&REQUEST=GetMap&CRS={}&LAYERS={}&BBOX={}&WIDTH={}&HEIGHT={}".format( + host, + access_key, + product, + product_config, + epsg_code, + product_instance["time"], + image_bounds_str, + image_size_in_pixels[0], + image_size_in_pixels[1], + ) + + try: + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + wms_url += "&TIME={}".format( + product_instance["valid_times"][0] + ) + except KeyError: + pass + + wms_url = sign_request(wms_url, access_key, access_key_secret) + print(wms_url) + + try: + response = urlopen(wms_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = response.read() + filename = "./wms_img_{}_{}.png".format(product, product_config) + print( + "Read {} bytes, saving as {}".format(len(content), filename) + ) + with open(filename, "wb") as f: + f.write(content) + + + +@typechecked +def request_geotiff(product: str, product_config: str, product_instance: str = "") -> 'tuple[str, dict]': + """ + Requests a GeoTIFF image for a specific product, product configuration, and product instance. + If no product instance is provided, the most recent instance is used. + + Args: + product (str): The product code. + product_config (str): The product configuration. + product_instance (str, optional): The product instance time. Defaults to an empty string. + + Returns: + tuple[str, dict]: The filename where the GeoTIFF is saved and a dictionary of valid times. + """ + + if not product_instance: + # For this example, we use the optional parameter "page_size" to limit the + # list of product instances to the most recent instance. + meta_url = "{}/{}/meta/tiles/product-instances/{}/{}?page_size=1".format( + host, access_key, product, product_config + ) + meta_url = sign_request( + meta_url, access_key, access_key_secret + ) + + try: + response = urlopen(meta_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + # Decode the product instance response and get the most recent product instance time, + # to be used in the geotiff request. + content = json.loads(response.read()) + product_instance = content[0]["time"] + + url = "/".join( + [ + host, + access_key, + "geotiff", + product, + product_config, + product_instance, + ] + ) + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = json.loads(response.read()) + url = content["source"] + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + filename = "./{}.tif".format( + "_".join([product, product_config, product_instance]) + ) + with open(filename, "wb") as f: + # The geotiffs can be very large, so we don't want to read the + # http body entirely into memory before writing -- copy it directly + # to a file instead. + shutil.copyfileobj(response, f) + return filename, content.get("valid_times", {}) + + +@typechecked +def bgfs_basic(lon: float, lat: float, date: Union[datetime.date, datetime.datetime], days: int = 1) -> None: + """ + Requests BGFS basic data for a specific longitude, latitude, date, and number of days. + + Args: + lon (float): The longitude of the location. + lat (float): The latitude of the location. + date (datetime.datetime): The date for the request. + days (int, optional): The number of days for the request. Defaults to 1. + + Returns: + None + """ + + url = "{host}/{key}/reports/bgfs/basic?lon={lon}&lat={lat}&utc={utc}&days={days}".format( + host=host, + key=access_key, + lon=lon, + lat=lat, + utc=date.strftime("%Y-%m-%d"), + days=days, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + content = json.loads(response.read()) + + # Convert back to json only so we can let the json library format the + # response for pretty display. + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def bgfs_extended(lon: float, lat: float, date: Union[datetime.date, datetime.datetime], days: int = 1) -> None: + + """ + Fetches extended weather reports using the BGFS API. + + Args: + lon (float): The longitude of the location. + lat (float): The latitude of the location. + date (datetime.datetime): The date for which the weather reports are requested. + days (int, optional): The number of days for which the weather reports are requested. Defaults to 1. + + Returns: + None + + Raises: + URLError: If there is an error in the URL request. + ValueError: If there is an error in the URL parameters. + """ + + url = "{host}/{key}/reports/bgfs/extended?lon={lon}&lat={lat}&utc={utc}&days={days}".format( + host=host, + key=access_key, + lon=lon, + lat=lat, + utc=date.strftime("%Y-%m-%d"), + days=days, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + content = json.loads(response.read()) + + # Convert back to json only so we can let the json library format the + # response for pretty display. + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def bgfs_hourly(lon: float, lat: float, date_hour: Union[datetime.date, datetime.datetime], hours: int = 1) -> None: + """ + Fetches hourly weather reports from the BGFS API for the given longitude, latitude, and date hour. + + Args: + lon (float): The longitude of the location. + lat (float): The latitude of the location. + date_hour (datetime.datetime): The date and hour for which to fetch the weather reports. + hours (int, optional): The number of hours of weather reports to fetch. Defaults to 1. + + Returns: + None + + Raises: + URLError: If there is an error in the URL request. + ValueError: If there is an error in the URL parameters. + """ + + url = "{host}/{key}/reports/bgfs/hourly?lon={lon}&lat={lat}&utc={utc}&hours={hours}".format( + host=host, + key=access_key, + lon=lon, + lat=lat, + utc=date_hour.strftime("%Y-%m-%dT%H:%M:%SZ"), + hours=hours, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + content = json.loads(response.read()) + + # Convert back to json only so we can let the json library format the + # response for pretty display. + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +from typing import Iterator, Dict +@typechecked +def iter_product_instances(product: str, product_config: str, request_limit: int = 100) -> Iterator[Dict]: + """ + Iterate over all available product instances, one by one, using a + configurable number of instances per request. + + Args: + product (str): The product code. + product_config (str): The product configuration. + request_limit (int, optional): The number of instances to request per API call. Defaults to 100. + + Yields: + dict: A product instance. + + Returns: + None + """ + + url_template = ( + "{}/{}/meta/tiles/product-instances/{}/{}?limit={}".format( + host, access_key, product, product_config, request_limit + ) + ) + url = url_template + + request_count = 0 + content_count = 0 + while content_count < request_limit: + signed_url = sign_request(url, access_key, access_key_secret) + request_count += 1 + try: + response = urlopen(signed_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = json.loads(response.read()) + for item in content: + yield item + + content_count += len(content) + + if len(content) < request_limit: + # We didn't get a full page, so we must be on the last page and + # therefore -- finished. + print( + "Request count: {}. Instance count: {}.".format( + request_count, + (request_count - 1) * request_limit + + len(content), + ) + ) + return + url = "{}&older_than={}".format( + url_template, content[-1]["time"] + ) + + +def test_api_calls(): + url = request_metar_nearest("38", "-96") + print("*** request METAR nearest ***") + print(url) + print(urlopen(url).read()) + print("") + + point_query( + "precip-totalaccum-24hr", "Standard-Mercator", -86.6, 34.4 + ) + + forecast_time = datetime.datetime.utcnow() + url = request_ndfd_basic(34.730301, -86.586098, forecast_time) + print("*** request NDFD hourly ***") + print(url) + print(urlopen(url).read()) + print("") + + # /point/baron-hires-temp-f-2meter/Standard-Mercator/2024-05-02T12%3A00%3A00Z.jsonp?callback=_jqjsp&lat=30.173624550358536&lon=-95.3009033203125&ts=1714685100&sig=IOUh5xEZzyRqzT1MQctn1vxSqXM=&valid_time=* + point_query( + "baron-hires-maxreflectivity-dbz-all", + "Mask1-Mercator", + -86.6, + 34.4, + ) + + point_query( + "baron-hires-windspeed-mph-10meter", + "Standard-Mercator", + -86.6, + 34.4, + ) + + # Get all product instances for a product. + for i, instance in enumerate(iter_product_instances('C39-0x0302-0', 'Standard-Mercator')): + print(type(instance)) + print('{:>3} {}'.format(i, instance['time'])) + + # Or, alternatively, get the product instances using a wms-style request. + request_wms_capabilities('C39-0x0302-0', 'Standard-Mercator') + + # Request the whole world in the EPSG:4326 projection. Note that the proportions for + # the image size in pixels and the image bounds are identical (2:1). + request_wms('C39-0x0302-0', 'Standard-Geodetic', [2048, 1024], [-90.0, -180.0, 90.0, 180.0]) + + # Request the whole world in the EPSG:3857 projection. Note that the proportions for + # the image size in pixels and the image bounds are identical (1:1). + request_wms('C39-0x0302-0', 'Standard-Mercator', [2048, 2048], [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]) + + filename, valid_times = request_geotiff('C39-0x0302-0', 'Standard-Mercator') + + + print("*** request point query ***") + point_query('C09-0x0331-0', 'Standard-Mercator', -86, 34) + print("") + + + # print("*** requesting METARS and Forecasts for North America ***") + # request_metar_northamerica() + # print("") + + + url = request_metar("egll") + print("*** request METAR ***") + print(url) + print(urlopen(url).read()) + print("") + + forecast_time = datetime.datetime.utcnow() + datetime.timedelta(hours=4) + url = request_ndfd_hourly(34.730301, -86.586098, forecast_time) + print("*** request NDFD hourly ***") + print(url) + print(urlopen(url).read()) + print("") + + request_tile("C39-0x0302-0", "Standard-Mercator", 1, 0, 1) + url = request_storm_vector("mhx") + print("*** request storm vectors ***") + print(url) + a = urlopen(url) + print('JSON for storm vectors is %d bytes' % len(urlopen(url).read())) + print("") + url = request_geocodeip() + print("*** geocode IP address ***") + print(url) + print(urlopen(url).read()) + print("") + url = request_lightning_count() + print("*** lightning count ***") + print(url) + print(urlopen(url).read()) + print("") + + date = datetime.datetime.now().date() + datetime.timedelta(days=1) + bgfs_basic(-86.6, 34.4, date, 1) + bgfs_extended(-86.6, 34.4, date, 1) + bgfs_hourly(-86.6, 34.4, datetime.datetime.combine(date, datetime.time(hour=6)), 1) + print("") + + point_query('C09-0x0331-0', 'Standard-Mercator', -86.6, 34.4) + point_query_multi('C09-0x0331-0', 'Standard-Mercator', [(-86.6, 34.4), (-90.14, 38)]) + point_query_region('C09-0x0331-0', 'Standard-Mercator', 34.4, 34.1, -86.6, -86.5) + + + + +# if __name__ == "__main__": +# main() diff --git a/swarms/structs/concat.py b/swarms/structs/concat.py new file mode 100644 index 00000000..6ed2c608 --- /dev/null +++ b/swarms/structs/concat.py @@ -0,0 +1,24 @@ +from typing import List + + +def concat_strings(string_list: List[str]) -> str: + """ + Concatenates a list of strings into a single string. + + Args: + string_list (List[str]): A list of strings to be concatenated. + + Returns: + str: The concatenated string. + + Raises: + TypeError: If the input is not a list of strings. + + """ + if not isinstance(string_list, list): + raise TypeError("Input must be a list of strings.") + + try: + return "".join(string_list) + except TypeError: + raise TypeError("All elements in the list must be strings.") diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py new file mode 100644 index 00000000..bdca22e5 --- /dev/null +++ b/swarms/structs/mixture_of_agents.py @@ -0,0 +1,159 @@ +from swarms.structs.agent import Agent +from swarms.structs.base_swarm import BaseSwarm +from typing import List, Any + +from swarms.structs.conversation import Conversation +from pydantic import BaseModel +from swarms.utils.loguru_logger import logger + + +class AgentRun(BaseModel): + agent_name: str + output: Any + + +class Metadata(BaseModel): + layers: int + agent_runs: List[AgentRun] + final_output: Any + + +class MixtureOfAgents(BaseSwarm): + """ + Represents a mixture of agents in a swarm. + The process is parallel -> sequential -> parallel -> final output agent. + From the paper: https://arxiv.org/pdf/2406.04692 + + Attributes: + agents (List[Agent]): The list of agents in the swarm. + flow (str): The flow of the swarm. + max_loops (int): The maximum number of loops to run. + verbose (bool): Flag indicating whether to print verbose output. + layers (int, optional): The number of layers in the swarm. Defaults to None. + rules (str, optional): The rules for the swarm. Defaults to None. + """ + + def __init__( + self, + name: str = "MixtureOfAgents", + description: str = "A swarm of agents that run in parallel and sequentially.", + agents: List[Agent] = None, + max_loops: int = 1, + verbose: bool = True, + layers: int = None, + rules: str = None, + final_agent: Agent = None, + auto_save: bool = False, + saved_file_name: str = "moe_swarm.json", + ): + self.name = name + self.description = description + self.agents = agents + self.max_loops = max_loops + self.verbose = verbose + self.layers = layers + self.rules = rules + self.final_agent = final_agent + self.auto_save = auto_save + self.saved_file_name = saved_file_name + + # Check the agents + self.agent_check() + self.final_agent_check() + + # Conversation + self.conversation = Conversation( + time_enabled=True, + rules=rules, + ) + + # Initialize the swarm + self.swarm_initialization() + + def agent_check(self): + if not isinstance(self.agents, list): + raise TypeError("Input must be a list of agents.") + for agent in self.agents: + if not isinstance(agent, Agent): + raise TypeError( + "Input must be a list of agents." + "Each agent must be an instance of Agent." + ) + + def final_agent_check(self): + if not isinstance(self.final_agent, Agent): + raise TypeError("Final agent must be an instance of Agent.") + + def swarm_initialization(self): + # Name, description, and logger + logger.info(f"Initializing swarm {self.name}.") + logger.info(f"Description: {self.description}") + logger.info(f"Initializing swarm with {len(self.agents)} agents.") + + def run(self, task: str = None, *args, **kwargs): + try: + # Running the swarm + logger.info(f"Running swarm {self.name}.") + + self.conversation.add("user", task) + + # Conversation history + history = self.conversation.return_history_as_string() + + agent_runs = [] + layer = 0 + while layer < self.layers: + logger.info(f"Running layer {layer} of the swarm.") + # Different Layers + # Run the agents for all agents on the input + responses = [] + for agent in self.agents: + out = agent.run(history, *args, **kwargs) + responses.append((agent.agent_name, out)) + agent_runs.append( + AgentRun(agent_name=agent.agent_name, output=out) + ) + + # Log the agent run + logger.info(f"Agent {agent.agent_name} output: {out}") + + # Add all the responses to the conversation + logger.info("Adding responses to the conversation.") + for agent_name, response in responses: + self.conversation.add(agent_name, response) + + # Update the history + history = self.conversation.return_history_as_string() + + layer += 1 + + logger.info(f"Completed layer {layer} of the swarm.") + + # Run the final output agent on the entire conversation history + logger.info( + "Running the final output agent on the conversation history." + ) + final_output = self.final_agent.run(history, *args, **kwargs) + self.conversation.add( + self.final_agent.agent_name, final_output + ) + + # Create metadata + logger.info("Creating metadata for the swarm.") + metadata = Metadata( + layers=self.layers, + agent_runs=agent_runs, + final_output=final_output, + ) + + # Save metadata to JSON file + logger.info("Saving metadata to JSON file.") + with open(self.saved_file_name, "w") as f: + f.write(metadata.json()) + + return self.conversation.return_history_as_string() + except Exception as e: + logger.error( + f"Error running swarm: {e} try optimizing the swarm inputs or re-iterate on the task." + ) + return None From 320548c6fea33e8c5ac530b4082af4f6b9155464 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:07:06 -0700 Subject: [PATCH 03/13] [CLEANUP] --- playground/weatherman_agent/.env.example | 5 - playground/weatherman_agent/.gitignore | 204 --- playground/weatherman_agent/README.md | 112 -- playground/weatherman_agent/api.py | 119 -- .../weatherman_agent/datasets/examples.csv | 40 - .../datasets/rain_weather_prompts.csv | 26 - .../datasets/weatherman_agent_LLM_prompts.csv | 26 - .../weatherman_agent/docs/llama3_hosted.md | 112 -- .../weatherman_agent/docs/weather_agent.md | 113 -- .../examples/baron_tool_with_swarms_tool.py | 30 - .../examples/llama_3_hosted_swarms.py | 19 - .../weatherman_agent/examples/llama_agent.py | 34 - .../weatherman_agent/examples/tool_schemas.py | 35 - playground/weatherman_agent/pyproject.toml | 55 - playground/weatherman_agent/requirements.txt | 18 - .../weatherman_agent/scripts/Dockerfile | 28 - playground/weatherman_agent/scripts/setup.sh | 0 .../tests/test_baron_tools.py | 56 - .../weatherman_agent/tests/test_llama3.py | 41 - .../tests/tests_weather_agent.py | 161 --- .../weatherman_agent/todo/director_agent.py | 279 ---- .../weatherman_agent/todo/worker_agents.py | 269 ---- playground/weatherman_agent/weather_agent.py | 50 - .../weather_swarm/__init__.py | 0 .../weatherman_agent/weather_swarm/prompts.py | 152 -- .../weather_swarm/tools/__init__.py | 0 .../weather_swarm/tools/baron_tools_schema.py | 145 -- .../tools/get_geo_coordinates.py | 109 -- .../weather_swarm/tools/tools.py | 1281 ----------------- 29 files changed, 3519 deletions(-) delete mode 100644 playground/weatherman_agent/.env.example delete mode 100644 playground/weatherman_agent/.gitignore delete mode 100644 playground/weatherman_agent/README.md delete mode 100644 playground/weatherman_agent/api.py delete mode 100644 playground/weatherman_agent/datasets/examples.csv delete mode 100644 playground/weatherman_agent/datasets/rain_weather_prompts.csv delete mode 100644 playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv delete mode 100644 playground/weatherman_agent/docs/llama3_hosted.md delete mode 100644 playground/weatherman_agent/docs/weather_agent.md delete mode 100644 playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py delete mode 100644 playground/weatherman_agent/examples/llama_3_hosted_swarms.py delete mode 100644 playground/weatherman_agent/examples/llama_agent.py delete mode 100644 playground/weatherman_agent/examples/tool_schemas.py delete mode 100644 playground/weatherman_agent/pyproject.toml delete mode 100644 playground/weatherman_agent/requirements.txt delete mode 100644 playground/weatherman_agent/scripts/Dockerfile delete mode 100644 playground/weatherman_agent/scripts/setup.sh delete mode 100644 playground/weatherman_agent/tests/test_baron_tools.py delete mode 100644 playground/weatherman_agent/tests/test_llama3.py delete mode 100644 playground/weatherman_agent/tests/tests_weather_agent.py delete mode 100644 playground/weatherman_agent/todo/director_agent.py delete mode 100644 playground/weatherman_agent/todo/worker_agents.py delete mode 100644 playground/weatherman_agent/weather_agent.py delete mode 100644 playground/weatherman_agent/weather_swarm/__init__.py delete mode 100644 playground/weatherman_agent/weather_swarm/prompts.py delete mode 100644 playground/weatherman_agent/weather_swarm/tools/__init__.py delete mode 100644 playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py delete mode 100644 playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py delete mode 100644 playground/weatherman_agent/weather_swarm/tools/tools.py diff --git a/playground/weatherman_agent/.env.example b/playground/weatherman_agent/.env.example deleted file mode 100644 index 0fa1a6b7..00000000 --- a/playground/weatherman_agent/.env.example +++ /dev/null @@ -1,5 +0,0 @@ -ANTHROPIC_API_KEY="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" -SWARMS_API_KEY="GET YOUR KEY AT https://swarms.world/account" -BARON_API_HOST="http://api.velocityweather.com/v1" -BARON_ACCESS_KEY="Y5lHXZfgce7P" -BARON_ACCESS_KEY_SECRET="rcscpInzyLuweENUjUtFDmqLkK1N0EPeaWQRjy7er1"] \ No newline at end of file diff --git a/playground/weatherman_agent/.gitignore b/playground/weatherman_agent/.gitignore deleted file mode 100644 index 97476ea2..00000000 --- a/playground/weatherman_agent/.gitignore +++ /dev/null @@ -1,204 +0,0 @@ -__pycache__/ -.venv/ - -.env - -image/ -audio/ -video/ -dataframe/ - -static/generated -runs -chroma -Weather Director Agent_state.json -Unit Testing Agent_state.json -Devin_state.json -swarms/__pycache__ -artifacts -transcript_generator.json -venv -.DS_Store -Cargo.lock -.DS_STORE -Cargo.lock -swarms/agents/.DS_Store -artifacts_two -logs -_build -conversation.txt -t1_state.json -stderr_log.txt -t2_state.json -.vscode -.DS_STORE -# Byte-compiled / optimized / DLL files -Transcript Generator_state.json -__pycache__/ -*.py[cod] -*$py.class -.grit -swarm-worker-01_state.json -error.txt -Devin Worker 2_state.json -# C extensions -*.so -.ruff_cache - - -errors.txt - -Autonomous-Agent-XYZ1B_state.json -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py -.DS_Store -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ -.vscode/settings.json diff --git a/playground/weatherman_agent/README.md b/playground/weatherman_agent/README.md deleted file mode 100644 index 4a932029..00000000 --- a/playground/weatherman_agent/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# Baron Weather - -## Overview -Baron Weather is a sophisticated toolset designed to enable real-time querying of weather data using the Baron API. It utilizes a swarm of autonomous agents to handle concurrent data requests, optimizing for efficiency and accuracy in weather data retrieval and analysis. - -## Features -Baron Weather includes the following key features: -- **Real-time Weather Data Access**: Instantly fetch and analyze weather conditions using the Baron API. -- **Autonomous Agents**: A swarm system for handling multiple concurrent API queries efficiently. -- **Data Visualization**: Tools for visualizing complex meteorological data for easier interpretation. - - -## Prerequisites -Before you begin, ensure you have met the following requirements: -- Python 3.10 or newer -- git installed on your machine -- Install packages like swarms - -## Installation - -There are 2 methods, git cloning which allows you to modify the codebase or pip install for simple usage: - -### Pip -`pip3 install -U weather-swarm` - -### Cloning the Repository -To get started with Baron Weather, clone the repository to your local machine using: - -```bash -git clone https://github.com/baronservices/weatherman_agent.git -cd weatherman_agent -``` - -### Setting Up the Environment -Create a Python virtual environment to manage dependencies: - -```bash -python -m venv venv -source venv/bin/activate # On Windows use `venv\Scripts\activate` -``` - -### Installing Dependencies -Install the necessary Python packages via pip: - -```bash -pip install -r requirements.txt -``` - -## Usage -To start querying the Baron Weather API using the autonomous agents, run: - -```bash -python main.py -``` - -## API - -```bash -python3 api.py -``` - - -### Llama3 - -```python -from swarms import llama3Hosted - - -# Example usage -llama3 = llama3Hosted( - model="meta-llama/Meta-Llama-3-8B-Instruct", - temperature=0.8, - max_tokens=1000, - system_prompt="You are a helpful assistant.", -) - -completion_generator = llama3.run( - "create an essay on how to bake chicken" -) - -print(completion_generator) - -``` - -# Documentation -- [Llama3Hosted](docs/llama3_hosted.md) - -## Contributing -Contributions to Baron Weather are welcome and appreciated. Here's how you can contribute: - -1. Fork the Project -2. Create your Feature Branch (`git checkout -b feature/YourAmazingFeature`) -3. Commit your Changes (`git commit -m 'Add some YourAmazingFeature'`) -4. Push to the Branch (`git push origin feature/YourAmazingFeature`) -5. Open a Pull Request - - -## Tests -To run tests run the following: - -`pytest` - -## Contact -Project Maintainer - [Kye Gomez](mailto:kye@swarms.world) - [GitHub Profile](https://github.com/baronservices) - - -# Todo -- [ ] Add the schemas to the worker agents to output json -- [ ] Implement the parser and the function calling mapping to execute the functions -- [ ] Implement the HiearArchical Swarm and plug in and all the agents -- [ ] Then, implement the API server wrapping the hiearchical swarm -- [ ] Then, Deploy on the server 24/7 \ No newline at end of file diff --git a/playground/weatherman_agent/api.py b/playground/weatherman_agent/api.py deleted file mode 100644 index f872afd4..00000000 --- a/playground/weatherman_agent/api.py +++ /dev/null @@ -1,119 +0,0 @@ -import os -import uuid -from typing import Any, Dict, List - -from dotenv import load_dotenv -from fastapi import FastAPI, HTTPException -from fastapi.middleware.cors import CORSMiddleware -from pydantic import BaseModel -from swarms import Agent, OpenAIChat -from swarms.utils.loguru_logger import logger - -from weather_swarm.prompts import ( - FEW_SHORT_PROMPTS, - GLOSSARY_PROMPTS, - WEATHER_AGENT_SYSTEM_PROMPT, -) -from weather_swarm.tools.tools import ( - point_query, - request_ndfd_basic, - request_ndfd_hourly, -) - -load_dotenv() - -logger.info("Starting the API server..") -app = FastAPI(debug=True) - -# Load the middleware to handle CORS -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - - -class ChatRequest(BaseModel): - model: str - prompt: str - max_tokens: int = 100 - temperature: float = 1.0 - - -class ChatResponse(BaseModel): - id: str - object: str - created: int - model: str - choices: List[Dict[str, Any]] - usage: Dict[str, Any] - - -@app.get("/v1/health") -async def health_check(): - return {"status": "ok"} - - -@app.get("/v1/models") -async def get_models(): - return {"models": ["WeatherMan Agent"]} - - -@app.post("/v1/chat/completions", response_model=ChatResponse) -async def chat_completions(request: ChatRequest): - if request.model != "WeatherMan Agent": - raise HTTPException(status_code=400, detail="Model not found") - - # Initialize the WeatherMan Agent - agent = Agent( - agent_name="WeatherMan Agent", - system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, - sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], - llm=OpenAIChat( - openai_api_key=os.getenv("OPENAI_API_KEY"), - max_tokens=request.max_tokens, - temperature=request.temperature, - ), - max_loops=1, - # dynamic_temperature_enabled=True, - # verbose=True, - output_type=str, - metadata_output_type="json", - function_calling_format_type="OpenAI", - function_calling_type="json", - tools=[point_query, request_ndfd_basic, request_ndfd_hourly], - ) - - # Response from the agent - - try: - response = agent.run(request.prompt) - return { - "id": uuid.uuid4(), - "object": "text_completion", - "created": int(os.times().system), - "model": agent.agent_name, - "choices": [{"text": response}], - "usage": { - "prompt_tokens": len(request.prompt.split()), - "completion_tokens": len(response.split()), - "total_tokens": len(request.prompt.split()) - + len(response.split()), - }, - } - except Exception as e: - raise HTTPException(status_code=500, detail=str(e)) - - -# Example of how to run the FastAPI app -def deploy_app(host: str = "0.0.0.0", port: int = 8000): - import uvicorn - - uvicorn.run(app, host=host, port=port) - - -# Run the FastAPI app -if __name__ == "__main__": - deploy_app() diff --git a/playground/weatherman_agent/datasets/examples.csv b/playground/weatherman_agent/datasets/examples.csv deleted file mode 100644 index d694d7ff..00000000 --- a/playground/weatherman_agent/datasets/examples.csv +++ /dev/null @@ -1,40 +0,0 @@ -prompt,goal,required inputs,api example -What is the current temperature?,allow the user to request the current temperature for their location,user's location,"request_metar_nearest(""38"", ""-96"")" -Describe the current weather.,have the LLM construct a narrative weather description based on current conditions,user's location,"request_metar_nearest(""38"", ""-96"")" -How much rain fell at my location?,allow the user to determine how much rain has accumulated at their location in the last 24 hours,user's location,"point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)" -Is it going to be sunny tomorrow?,allow the user to determine cloud coverage for their location ,user's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" -Is rain expected at my location in the next 6 hours? ,allow the user to determine if precip will fall in the coming hours,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -What is the max forecasted temperature today? ,allow the user to determine how hot or cold the air temp will be,user's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" -Will it be windy today? ,allow the user to determine the max wind speed for that day,user's location,"point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)" -,,, -How much rain fell at my location on date/time?,,"user's location, date/time", -What dates did hail fall at my location during x time range? ,allow the user to request a list of dates at which hail fell at their location,"user's location, date range", -Is it good weather to spray fertilizer? ,,, -How will the weather today impact solar panel performance? ,,, -Will my soccer game get rained out this evening? ,"determine if rain will impact my location ""this evening""","user's location, current date","point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Is it safe to go hiking today based on the weather? ,"check for high wind or rain forecast, perhaps extreme heat and cold","hiking location, current date", -What is the liklihood of frost tonight? ,are forecast conditions right for frost,"location, date", -What time will be the hottest part of the day tomorrow? ,determine highest forecast heat index tomorrow,"location, tomorrow's date", -When is it forecasted to rain again at my house? ,"use forecast precip rate or max reflectivity and/or accums to see if rain is forecasted in the next 3 days. If not, swap to GFS for days 4-14",location, -How will the weather impact my flight today? ,check against conditions commonly associated with flight delays,location/time of departure at airport, -Are there any flood warnings in my area? ,check against current watch/warning map,location, -How will the weather affect road conditions and traffic safety tomorrow morning?,"check forecasted road conditions, perhaps check for heavy precip rate, high accums, snow depth",location/route, -When was the last time it rained at my location? ,"use historical rainfall, weather inspector?","location, date range", -,,, -,,, -What's the highest temperature in United States right now?,determine the highest current temperature in the US,search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53 - -discard all METARs that do not begin with the letter K -" -What's the lowest temperature in United States right now?,determine the lowest current temperature in the US,search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53 - -discard all METARs that do not begin with the letter K -" -What's the highest temperature in the world right now?,determine the highest current temperature in the world,search all METARs, -What's the lowest temperature in the world right now?,determine the lowest current temperature in the world,search all METARs,https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/all.json?page=1&ts=1716776520&sig=LOC_xB0tt3qtoqmL8iy6wtguLXI= -,,, -,,, -,,, -,,, -,,, -Weather inspector tie in???,,, \ No newline at end of file diff --git a/playground/weatherman_agent/datasets/rain_weather_prompts.csv b/playground/weatherman_agent/datasets/rain_weather_prompts.csv deleted file mode 100644 index 6f9a6276..00000000 --- a/playground/weatherman_agent/datasets/rain_weather_prompts.csv +++ /dev/null @@ -1,26 +0,0 @@ -prompt,goal,required inputs,api example -How much rain fell at my location?,allow the user to determine how much rain has accumulated at their location in the last 24 hours,user's location,"point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)" -Is rain expected at my location in the next 6 hours?,"allow the user to determine if precip will fall in the coming hours, forecast query",user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -How much rain fell at my location on date/time?,historical query,"user's location, date/time",https://api.velocityweather.com/v1/cLRlLroVhajP/point/north-american-radar/Mask1-Mercator/2024-05-08T21%3A14%3A43Z.json?lat=35.505400093441324&lon=-87.60498046875&ts=1717294800&sig=_mCs5_XfZKQon55AzSGPI7dtoHY= -Will my soccer game get rained out this evening?,"determine if rain will impact my location ""this evening"", forecast query","user's location, current date","point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -When is it forecasted to rain again at my house?,"use forecast precip rate or max reflectivity and/or accums to see if rain is forecasted in the next 3 days. If not, swap to GFS for days 4-14",user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -When was the last time it rained at my location?,"use historical rainfall, weather inspector?","location, date range", -Is there any chance of rain during my commute today?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Should I bring an umbrella for my walk this afternoon?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Will it rain for the outdoor concert tonight?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Will it rain during my barbecue this weekend?,forecast query,location of bbq,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Is there a storm expected in my area today?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Will it rain on my drive to work tomorrow morning?,forecast query,user's location + work location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Are there any rain showers predicted for this afternoon?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Should I expect rain during my jog this evening?,forecast query,user's location.....where will they jog? will the LLM prompt?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -How likely is it to rain during my picnic at the park?,forecast query,user's location .... will the LLM prompt for the picnic location?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Is rain expected when I plan to leave for the airport?,forecast query,user's location....will the LLM prompt for the location they'll depart from?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Will the weather be dry for my cycling trip today?,forecast query,"location of cycling trip, starting point","point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Is rain in the forecast for my beach outing tomorrow?,forecast query,location of beach,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Will it rain during my son's baseball game tonight?,forecast query,location of baseball game,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Should I prepare for rain on my camping trip this weekend?,forecast query,location of campsite....will the LLM prompt for the campsite location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -What’s the rain forecast for my neighborhood today?,forecast query,neighbourhood location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Is there any rainfall expected while I'm gardening this afternoon?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -How heavy is the rain expected to be tonight?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Are there any rainstorms predicted during my road trip?,forecast query,can the LLM prompt for location/route of the road trip? should we state we don't support multi-location prompts? can we pull this off?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Will there be rain showers in my area over the next few days?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" diff --git a/playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv b/playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv deleted file mode 100644 index 4dfef6c7..00000000 --- a/playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv +++ /dev/null @@ -1,26 +0,0 @@ -prompt,goal,required inputs,api example -What is the current temperature?,Allow the user to request the current temperature for their location,User's location,"request_metar_nearest(""38"", ""-96"")" -Describe the current weather.,Have the LLM construct a narrative weather description based on current conditions,User's location,"request_metar_nearest(""38"", ""-96"")" -How much rain fell at my location?,Allow the user to determine how much rain has accumulated at their location in the last 24 hours,User's location,"point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)" -Is it going to be sunny tomorrow?,Allow the user to determine cloud coverage for their location,User's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" -Is rain expected at my location in the next 6 hours?,Allow the user to determine if precipitation will fall in the coming hours,User's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -What is the max forecasted temperature today?,Allow the user to determine how hot or cold the air temp will be,User's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" -Will it be windy today?,Allow the user to determine the max wind speed for that day,User's location,"point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)" -How much rain fell at my location on date/time?,,,User's location, date/time -What dates did hail fall at my location during x time range?,Allow the user to request a list of dates at which hail fell at their location,User's location, date range -Is it good weather to spray fertilizer?,,,, -How will the weather today impact solar panel performance?,,,, -Will my soccer game get rained out this evening?,Determine if rain will impact my location "this evening",User's location, current date,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" -Is it safe to go hiking today based on the weather?,Check for high wind or rain forecast, perhaps extreme heat and cold,Hiking location, current date -What is the likelihood of frost tonight?,Are forecast conditions right for frost,Location, date -What time will be the hottest part of the day tomorrow?,Determine highest forecast heat index tomorrow,Location, tomorrow's date -When is it forecasted to rain again at my house?,Use forecast precip rate or max reflectivity and/or accums to see if rain is forecasted in the next 3 days. If not, swap to GFS for days 4-14,Location, -How will the weather impact my flight today?,Check against conditions commonly associated with flight delays,Location/time of departure at airport, -Are there any flood warnings in my area?,Check against current watch/warning map,Location, -How will the weather affect road conditions and traffic safety tomorrow morning?,Check forecasted road conditions, perhaps check for heavy precip rate, high accums, snow depth,Location/route, -When was the last time it rained at my location?,Use historical rainfall, weather inspector?,Location, date range -What's the highest temperature in United States right now?,Determine the highest current temperature in the US,Search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53" -What's the lowest temperature in United States right now?,Determine the lowest current temperature in the US,Search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53" -What's the highest temperature in the world right now?,Determine the highest current temperature in the world,Search all METARs,https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/all.json?page=1&ts=1716776520&sig=LOC_xB0tt3qtoqmL8iy6wtguLXI= -What's the lowest temperature in the world right now?,Determine the lowest current temperature in the world,Search all METARs,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/all.json?page=1&ts=1716776520&sig=LOC_xB0tt3qtoqmL8iy6wtguLXI=" -Weather inspector tie in???,,,, diff --git a/playground/weatherman_agent/docs/llama3_hosted.md b/playground/weatherman_agent/docs/llama3_hosted.md deleted file mode 100644 index 9fd770f4..00000000 --- a/playground/weatherman_agent/docs/llama3_hosted.md +++ /dev/null @@ -1,112 +0,0 @@ -# llama3Hosted Documentation - -## Overview - -The `llama3Hosted` class is a high-level interface for interacting with a hosted version of the Llama3 model. This class is designed to simplify the process of generating responses from the Llama3 model by providing an easy-to-use interface for sending requests and receiving responses. The Llama3 model is a state-of-the-art language model developed by Meta, known for its ability to generate human-like text based on the input it receives. - -### Key Features - -- **Model Customization**: Allows the user to specify which version of the Llama3 model to use. -- **Temperature Control**: Adjusts the randomness of the generated responses. -- **Token Limitation**: Sets a limit on the maximum number of tokens in the generated response. -- **System Prompt**: Defines the initial context for the conversation, guiding the model's responses. - -## Purpose - -The `llama3Hosted` class is designed to provide developers with a straightforward way to utilize the capabilities of the Llama3 model without dealing with the complexities of model hosting and API management. It is particularly useful for applications that require natural language understanding and generation, such as chatbots, virtual assistants, and content generation tools. - -## Class Definition - -### llama3Hosted Parameters - -| Parameter | Type | Default | Description | -|----------------|--------|-----------------------------------------|--------------------------------------------------------------| -| `model` | `str` | `"meta-llama/Meta-Llama-3-8B-Instruct"` | The name or path of the Llama3 model to use. | -| `temperature` | `float`| `0.8` | The temperature parameter for generating responses. | -| `max_tokens` | `int` | `4000` | The maximum number of tokens in the generated response. | -| `system_prompt`| `str` | `"You are a helpful assistant."` | The system prompt to use for generating responses. | -| `*args` | | | Variable length argument list. | -| `**kwargs` | | | Arbitrary keyword arguments. | - -### Attributes - -| Attribute | Type | Description | -|----------------|--------|--------------------------------------------------------------| -| `model` | `str` | The name or path of the Llama3 model. | -| `temperature` | `float`| The temperature parameter for generating responses. | -| `max_tokens` | `int` | The maximum number of tokens in the generated response. | -| `system_prompt`| `str` | The system prompt for generating responses. | - -## Method: run - -### Parameters - -| Parameter | Type | Description | -|-----------|--------|-----------------------------------| -| `task` | `str` | The user's task or input. | -| `*args` | | Variable length argument list. | -| `**kwargs`| | Arbitrary keyword arguments. | - -### Returns - -| Type | Description | -|------|--------------------------------------------| -| `str`| The generated response from the Llama3 model.| - -### Usage Examples -First install weather_swarm with: - -`$ pip install -U weather-swarm` - - -#### Example 1: Basic Usage - -```python -from weather_swarmn import llama3Hosted - -llama = llama3Hosted() -response = llama.run("Tell me a joke.") -print(response) -``` - -#### Example 2: Custom Model and Parameters - -```python -import requests -import json -from weather_swarmn import llama3Hosted - - -llama = llama3Hosted( - model="custom-llama-model", - temperature=0.5, - max_tokens=2000, - system_prompt="You are a witty assistant." -) -response = llama.run("What's the weather like today?") -print(response) -``` - -#### Example 3: Using Additional Arguments - -```python -from weather_swarmn import llama3Hosted - -llama = llama3Hosted() -response = llama.run("Write a short story.", custom_stop_tokens=[128002, 128003]) -print(response) -``` - -## Additional Information and Tips - -- **Temperature Parameter**: The temperature parameter controls the randomness of the model's output. Lower values (close to 0) make the output more deterministic, while higher values (up to 1) make it more random. -- **System Prompt**: Crafting an effective system prompt can significantly impact the quality and relevance of the model's responses. Ensure the prompt aligns well with the intended use case. -- **Error Handling**: Always include error handling when making API requests to ensure your application can gracefully handle any issues that arise. - -## References and Resources - -- [Llama3 Model Documentation](https://github.com/facebookresearch/llama) -- [Requests Library Documentation](https://docs.python-requests.org/en/latest/) -- [JSON Library Documentation](https://docs.python.org/3/library/json.html) - -This documentation provides a comprehensive overview of the `llama3Hosted` class, its parameters, attributes, methods, and usage examples. By following this guide, developers can effectively integrate and utilize the Llama3 model in their applications. \ No newline at end of file diff --git a/playground/weatherman_agent/docs/weather_agent.md b/playground/weatherman_agent/docs/weather_agent.md deleted file mode 100644 index 58bf7ad0..00000000 --- a/playground/weatherman_agent/docs/weather_agent.md +++ /dev/null @@ -1,113 +0,0 @@ -## Weather Agent API Documentation - -### Overview -The Weather Agent API provides endpoints to interact with a weather prediction model, "WeatherMan Agent". This API allows users to get weather-related information through chat completions using the OpenAI GPT model with specific prompts and tools. - -### Base URL -``` -http://localhost:8000 -``` - -### Endpoints - -#### Health Check - -##### `GET /v1/health` -Checks the health status of the API. - -**Response:** -- `200 OK`: Returns a JSON object indicating the status of the API. - ```json - { - "status": "ok" - } - ``` - -#### Get Models - -##### `GET /v1/models` -Retrieves the list of available models. - -**Response:** -- `200 OK`: Returns a JSON object with the list of models. - ```json - { - "models": ["WeatherMan Agent"] - } - ``` - -#### Chat Completions - -##### `POST /v1/chat/completions` -Generates weather-related responses based on the provided prompt using the "WeatherMan Agent" model. - -**Request Body:** -- `model` (string): The name of the model to use. Must be "WeatherMan Agent". -- `prompt` (string): The input prompt for the chat completion. -- `max_tokens` (integer, optional): The maximum number of tokens to generate. Default is 100. -- `temperature` (float, optional): The sampling temperature for the model. Default is 1.0. - -**Example Request:** -```json -{ - "model": "WeatherMan Agent", - "prompt": "What will the weather be like tomorrow in New York?", - "max_tokens": 100, - "temperature": 1.0 -} -``` - -**Response:** -- `200 OK`: Returns a JSON object with the completion result. - ```json - { - "id": "unique-id", - "object": "text_completion", - "created": 1234567890, - "model": "WeatherMan Agent", - "choices": [ - { - "text": "The weather tomorrow in New York will be..." - } - ], - "usage": { - "prompt_tokens": 10, - "completion_tokens": 15, - "total_tokens": 25 - } - } - ``` -- `400 Bad Request`: If the model specified is not "WeatherMan Agent". - ```json - { - "detail": "Model not found" - } - ``` -- `500 Internal Server Error`: If there is an error processing the request. - ```json - { - "detail": "Error message" - } - ``` - -### Models -The API supports the following model: -- **WeatherMan Agent**: A specialized agent for providing weather-related information based on the prompt. - -### Usage - -1. **Health Check:** Verify that the API is running by sending a GET request to `/v1/health`. -2. **Get Models:** Retrieve the list of available models by sending a GET request to `/v1/models`. -3. **Chat Completions:** Generate a weather-related response by sending a POST request to `/v1/chat/completions` with the required parameters. - -### Error Handling -The API returns appropriate HTTP status codes and error messages for different error scenarios: -- `400 Bad Request` for invalid requests. -- `500 Internal Server Error` for unexpected errors during processing. - -### CORS Configuration -The API allows cross-origin requests from any origin, supporting all methods and headers. - ---- - -For further assistance or issues, please contact the API support team. \ No newline at end of file diff --git a/playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py b/playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py deleted file mode 100644 index b2ede198..00000000 --- a/playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py +++ /dev/null @@ -1,30 +0,0 @@ -from weather_swarm.tools.tools import request_metar_nearest -from swarms import tool - - -@tool( - name="RequestMetarNearest", - description=( - "Requests the nearest METAR (Meteorological Aerodrome Report)" - " data based on the given latitude and longitude." - ), - return_string=False, - return_dict=False, -) -def request_metar_nearest_new(lat: float, lon: float): - """ - Requests the nearest METAR (Meteorological Aerodrome Report) data based on the given latitude and longitude. - - Args: - lat (float): The latitude of the location. - lon (float): The longitude of the location. - - Returns: - The METAR data for the nearest location. - """ - return request_metar_nearest(lat, lon) - - -out = request_metar_nearest_new(37.7749, -122.4194) -print(out) -print(type(out)) diff --git a/playground/weatherman_agent/examples/llama_3_hosted_swarms.py b/playground/weatherman_agent/examples/llama_3_hosted_swarms.py deleted file mode 100644 index 78292685..00000000 --- a/playground/weatherman_agent/examples/llama_3_hosted_swarms.py +++ /dev/null @@ -1,19 +0,0 @@ -from swarms import llama3Hosted - - -# Example usage -llama3 = llama3Hosted( - model="meta-llama/Meta-Llama-3-8B-Instruct", - temperature=0.8, - max_tokens=1000, - system_prompt=( - "You're a weather agent for Baron Weather, you specialize in" - " weather analysis" - ), -) - -completion_generator = llama3.run( - "What are the best weather conditions to lay concrete", -) - -print(completion_generator) diff --git a/playground/weatherman_agent/examples/llama_agent.py b/playground/weatherman_agent/examples/llama_agent.py deleted file mode 100644 index 6debdd38..00000000 --- a/playground/weatherman_agent/examples/llama_agent.py +++ /dev/null @@ -1,34 +0,0 @@ -from swarms import Agent -from swarms import llama3Hosted -from weather_swarm.prompts import GLOSSARY_PROMPTS -from weather_swarm.prompts import ( - FEW_SHORT_PROMPTS, - WEATHER_AGENT_SYSTEM_PROMPT, -) - - -# Purpose = To generate weather information for the user and send API requests to the Baron Weather API -agent = Agent( - agent_name="WeatherMan Agent", - system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, - sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], - # sop=list_tool_schemas_json, - llm=llama3Hosted( - max_tokens=2000, - temperature=0.1, - ), - max_loops="auto", - autosave=True, - dashboard=False, - streaming_on=True, - interactive=True, -) - -# Run the agent to generate the person's information -generated_data = agent.run( - "Based on the current humidity in Huntsville, how frizzy will my" - " hair get?" -) - -# Print the generated data -# print(f"Generated data: {generated_data}") diff --git a/playground/weatherman_agent/examples/tool_schemas.py b/playground/weatherman_agent/examples/tool_schemas.py deleted file mode 100644 index 3cb561c2..00000000 --- a/playground/weatherman_agent/examples/tool_schemas.py +++ /dev/null @@ -1,35 +0,0 @@ -from swarms import get_openai_function_schema_from_func - -from weather_swarm.tools.tools import ( - request_metar_nearest, - point_query, - request_ndfd_basic, - # point_query_region, - request_ndfd_hourly, -) - - -def get_schemas_for_funcs(funcs): - schemas = [] - for func in funcs: - name = str(func.__name__) - description = str(func.__doc__) - schema = get_openai_function_schema_from_func( - func, name=name, description=description - ) - schemas.append(str(schema)) - merged_schemas = "\n".join(schemas) - return merged_schemas - - -funcs = [ - request_metar_nearest, - point_query, - request_ndfd_basic, - # point_query_region, - request_ndfd_hourly, -] - -schemas = get_schemas_for_funcs(funcs) -print(schemas) -print(type(schemas)) diff --git a/playground/weatherman_agent/pyproject.toml b/playground/weatherman_agent/pyproject.toml deleted file mode 100644 index 5d38a19a..00000000 --- a/playground/weatherman_agent/pyproject.toml +++ /dev/null @@ -1,55 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.0.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "weather-swarm" -version = "0.0.6" -description = "Weather Swarm - Pytorch" -license = "MIT" -authors = ["Kye Gomez "] -homepage = "https://github.com/baronservices/weatherman_agent" -documentation = "https://github.com/baronservices/weatherman_agent" # Add this if you have documentation. -readme = "README.md" # Assuming you have a README.md -repository = "https://github.com/baronservices/weatherman_agent" -keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering"] -classifiers = [ - "Development Status :: 4 - Beta", - "Intended Audience :: Developers", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "License :: OSI Approved :: MIT License", - "Programming Language :: Python :: 3.9" -] - -[tool.poetry.dependencies] -python = "^3.10" -swarms = "*" -pydantic = "2.7.1" - - - -[tool.poetry.group.lint.dependencies] -ruff = "^0.1.6" -types-toml = "^0.10.8.1" -types-redis = "^4.3.21.6" -types-pytz = "^2023.3.0.0" -black = "^23.1.0" -types-chardet = "^5.0.4.6" -mypy-protobuf = "^3.0.0" - - -[tool.autopep8] -max_line_length = 80 -ignore = "E501,W6" # or ["E501", "W6"] -in-place = true -recursive = true -aggressive = 3 - - -[tool.ruff] -line-length = 70 - -[tool.black] -line-length = 70 -target-version = ['py38'] -preview = true diff --git a/playground/weatherman_agent/requirements.txt b/playground/weatherman_agent/requirements.txt deleted file mode 100644 index a26b8b84..00000000 --- a/playground/weatherman_agent/requirements.txt +++ /dev/null @@ -1,18 +0,0 @@ -swarms -pydantic==2.7.1 -base64==1.0.0 -datetime==4.3 -hashlib==20081119 -hmac==20151222 -shutil==1.7.0 -urllib3==1.26.7 -json5==0.9.6 -codecs==1.0.0 -fastapi -pytest -hydra -loguru -requests -opencv-python -beartype -termcolor diff --git a/playground/weatherman_agent/scripts/Dockerfile b/playground/weatherman_agent/scripts/Dockerfile deleted file mode 100644 index 7213ac11..00000000 --- a/playground/weatherman_agent/scripts/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# Use an official Python runtime as a parent image -FROM python:3.10-slim-buster - -# Set environment varibles -ENV PYTHONDONTWRITEBYTECODE 1 -ENV PYTHONUNBUFFERED 1 - -# Set work directory -WORKDIR /app - -# Install system dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - gcc \ - default-libmysqlclient-dev \ - && rm -rf /var/lib/apt/lists/* - -# Install Python dependencies -COPY requirements.txt /app/ -RUN pip install --no-cache-dir -r requirements.txt - -# Copy project -COPY . /app/ - -# Expose port -EXPOSE 5000 - -# Run the application: -CMD ["gunicorn", "-w", "4", "-k", "gevent", "api:app"] \ No newline at end of file diff --git a/playground/weatherman_agent/scripts/setup.sh b/playground/weatherman_agent/scripts/setup.sh deleted file mode 100644 index e69de29b..00000000 diff --git a/playground/weatherman_agent/tests/test_baron_tools.py b/playground/weatherman_agent/tests/test_baron_tools.py deleted file mode 100644 index 4b21856a..00000000 --- a/playground/weatherman_agent/tests/test_baron_tools.py +++ /dev/null @@ -1,56 +0,0 @@ -from unittest.mock import patch -from weather_swarm.tools.tools import ( - request_metar_nearest, - point_query, - request_ndfd_basic, -) - - -class TestWeatherFunctions: - @patch("your_module.request_metar_nearest") - def test_request_metar_nearest(self, mock_request_metar_nearest): - mock_request_metar_nearest.return_value = "expected_value" - result = request_metar_nearest("38", "-96") - assert result == "expected_value" - - @patch("your_module.point_query") - def test_point_query_precip_totalaccum(self, mock_point_query): - mock_point_query.return_value = "expected_value" - result = point_query( - "precip-totalaccum-24hr", "Standard-Mercator", -86.6, 34.4 - ) - assert result == "expected_value" - - @patch("your_module.point_query") - def test_point_query_baron_hires_maxreflectivity( - self, mock_point_query - ): - mock_point_query.return_value = "expected_value" - result = point_query( - "baron-hires-maxreflectivity-dbz-all", - "Mask1-Mercator", - -86.6, - 34.4, - ) - assert result == "expected_value" - - @patch("your_module.point_query") - def test_point_query_baron_hires_windspeed( - self, mock_point_query - ): - mock_point_query.return_value = "expected_value" - result = point_query( - "baron-hires-windspeed-mph-10meter", - "Standard-Mercator", - -86.6, - 34.4, - ) - assert result == "expected_value" - - @patch("your_module.request_ndfd_basic") - def test_request_ndfd_basic(self, mock_request_ndfd_basic): - mock_request_ndfd_basic.return_value = "expected_value" - result = request_ndfd_basic( - 34.730301, -86.586098, "forecast_time" - ) - assert result == "expected_value" diff --git a/playground/weatherman_agent/tests/test_llama3.py b/playground/weatherman_agent/tests/test_llama3.py deleted file mode 100644 index 2e98c03d..00000000 --- a/playground/weatherman_agent/tests/test_llama3.py +++ /dev/null @@ -1,41 +0,0 @@ -from unittest.mock import Mock, patch -from swarms import llama3Hosted - - -class TestLlama3Hosted: - def setup_method(self): - self.llama = llama3Hosted() - - def test_init(self): - assert ( - self.llama.model == "meta-llama/Meta-Llama-3-8B-Instruct" - ) - assert self.llama.temperature == 0.8 - assert self.llama.max_tokens == 4000 - assert ( - self.llama.system_prompt == "You are a helpful assistant." - ) - - @patch("requests.request") - def test_run(self, mock_request): - mock_response = Mock() - expected_result = "Test response" - mock_response.json.return_value = { - "choices": [{"message": {"content": expected_result}}] - } - mock_request.return_value = mock_response - - result = self.llama.run("Test task") - assert result == expected_result - mock_request.assert_called_once_with( - "POST", - "http://34.204.8.31:30001/v1/chat/completions", - headers={"Content-Type": "application/json"}, - data=( - '{"model": "meta-llama/Meta-Llama-3-8B-Instruct",' - ' "messages": [{"role": "system", "content": "You are' - ' a helpful assistant."}, {"role": "user", "content":' - ' "Test task"}], "stop_token_ids": [128009, 128001],' - ' "temperature": 0.8, "max_tokens": 4000}' - ), - ) diff --git a/playground/weatherman_agent/tests/tests_weather_agent.py b/playground/weatherman_agent/tests/tests_weather_agent.py deleted file mode 100644 index 891da6a6..00000000 --- a/playground/weatherman_agent/tests/tests_weather_agent.py +++ /dev/null @@ -1,161 +0,0 @@ -import os -import pytest -from dotenv import load_dotenv -from weather_swarm import Agent -from weather_swarm.prompts import ( - WEATHER_AGENT_SYSTEM_PROMPT, - GLOSSARY_PROMPTS, - FEW_SHORT_PROMPTS, -) -from weather_swarm.tools.tools import ( - point_query, - request_ndfd_basic, - request_ndfd_hourly, -) -from swarms import OpenAIChat -from unittest.mock import Mock, patch - -# Load environment variables for tests -load_dotenv() - - -# Fixtures -@pytest.fixture -def weather_agent(): - return Agent( - agent_name="WeatherMan Agent", - system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, - sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], - llm=OpenAIChat(), - max_loops=1, - dynamic_temperature_enabled=True, - verbose=True, - output_type=str, - tools=[point_query, request_ndfd_basic, request_ndfd_hourly], - docs_folder="datasets", - metadata="json", - function_calling_format_type="OpenAI", - function_calling_type="json", - ) - - -# Test Environment Loading -def test_load_dotenv(): - assert ( - "API_KEY" in os.environ - ), "API_KEY not found in environment variables" - assert ( - "API_SECRET" in os.environ - ), "API_SECRET not found in environment variables" - - -# Test Agent Initialization -def test_agent_initialization(weather_agent): - assert weather_agent.agent_name == "WeatherMan Agent" - assert weather_agent.system_prompt == WEATHER_AGENT_SYSTEM_PROMPT - assert weather_agent.llm is not None - assert len(weather_agent.tools) == 3 - assert weather_agent.max_loops == 1 - assert weather_agent.dynamic_temperature_enabled is True - assert weather_agent.verbose is True - assert weather_agent.output_type == str - assert weather_agent.docs_folder == "datasets" - assert weather_agent.metadata == "json" - assert weather_agent.function_calling_format_type == "OpenAI" - assert weather_agent.function_calling_type == "json" - - -# Parameterized Testing for Agent Tools -@pytest.mark.parametrize( - "tool", [point_query, request_ndfd_basic, request_ndfd_hourly] -) -def test_agent_tools(weather_agent, tool): - assert tool in weather_agent.tools - - -# Mocking the Agent Run Method -@patch.object( - Agent, - "run", - return_value="No, there are no chances of rain today in Huntsville.", -) -def test_agent_run(mock_run, weather_agent): - response = weather_agent.run( - "Are there any chances of rain today in Huntsville?" - ) - assert ( - response - == "No, there are no chances of rain today in Huntsville." - ) - mock_run.assert_called_once_with( - "Are there any chances of rain today in Huntsville?" - ) - - -# Testing Agent's Response Handling -def test_agent_response_handling(weather_agent): - weather_agent.llm = Mock() - weather_agent.llm.return_value = "Mocked Response" - response = weather_agent.run("What's the weather like?") - assert response == "Mocked Response" - - -# Test for Exception Handling in Agent Run -def test_agent_run_exception_handling(weather_agent): - weather_agent.llm = Mock( - side_effect=Exception("Mocked Exception") - ) - with pytest.raises(Exception, match="Mocked Exception"): - weather_agent.run("Will it rain tomorrow?") - - -# Testing Agent Initialization with Missing Parameters -def test_agent_initialization_missing_params(): - with pytest.raises(TypeError): - Agent(agent_name="WeatherMan Agent") - - -# Mocking Environment Variables -@patch.dict( - os.environ, - {"API_KEY": "mock_api_key", "API_SECRET": "mock_api_secret"}, -) -def test_environment_variables(): - load_dotenv() - assert os.getenv("API_KEY") == "mock_api_key" - assert os.getenv("API_SECRET") == "mock_api_secret" - - -# Testing Tools Functionality (Example: point_query) -def test_point_query(): - response = point_query("test_latitude", "test_longitude") - assert ( - response is not None - ) # Replace with more specific assertions based on actual function behavior - - -# Testing Tools Functionality (Example: request_ndfd_basic) -def test_request_ndfd_basic(): - response = request_ndfd_basic("test_latitude", "test_longitude") - assert ( - response is not None - ) # Replace with more specific assertions based on actual function behavior - - -# Testing Tools Functionality (Example: request_ndfd_hourly) -def test_request_ndfd_hourly(): - response = request_ndfd_hourly("test_latitude", "test_longitude") - assert ( - response is not None - ) # Replace with more specific assertions based on actual function behavior - - -# Grouping and Marking Tests -@pytest.mark.slow -def test_slow_functionality(weather_agent): - response = weather_agent.run("Long running query") - assert response is not None # Example placeholder - - -# Test Coverage Report -# Run the following command to generate a coverage report: `pytest --cov=weather_swarm` diff --git a/playground/weatherman_agent/todo/director_agent.py b/playground/weatherman_agent/todo/director_agent.py deleted file mode 100644 index faa5f1fa..00000000 --- a/playground/weatherman_agent/todo/director_agent.py +++ /dev/null @@ -1,279 +0,0 @@ -from swarms import Agent -from swarms import llama3Hosted -from weather_swarm.prompts import GLOSSARY_PROMPTS -from pydantic import BaseModel, Field - - -# Define the schema for the HierarchicalSwarmRequest -# class HierarchicalSwarmRequest(BaseModel): -# agents: Dict[str, Any] = Field( -# ..., -# description=( -# "The name of the agents and their respective tasks to be" -# " executed hierarchically." -# ), -# examples={ -# "Weather Director Agent": { -# "task": ( -# "Are there any chances of rain today in" -# " Huntsville?" -# ) -# } -# }, -# ) - - -class HierarchicalSwarmRequest(BaseModel): - task: str = Field( - ..., - description="The user's query.", - examples={ - "What is the current temperature at my location?": { - "task": "What is the current temperature at my location?" - } - }, - ) - agent_name: str = Field( - ..., - description="The name of the specialized agent.", - examples={ - "Current Temperature Retrieval Agent": "Current Temperature Retrieval Agent" - }, - ) - - -# Define the schema for the HierarchicalSwarmResponse -def DIRECTOR_SYSTEM_PROMPT() -> str: - return """**Prompt:** - As a director master agent, your task is to communicate with the user, understand their weather-related queries, and delegate the appropriate tasks to specialized worker agents. Each worker agent is specialized in retrieving a specific type of weather data. Your role involves selecting the correct agent or a list of agents, giving them the necessary tasks, and compiling their responses to provide a comprehensive answer to the user. - - **Goal:** - Efficiently manage and delegate tasks to specialized worker agents to gather the necessary weather data and provide a detailed, accurate response to the user. - - **Process:** - 1. **Receive User Query:** - - Understand the user's question or request regarding weather data. - - 2. **Identify Required Data:** - - Determine the type(s) of weather data needed to answer the user's query. - - 3. **Select Appropriate Agents:** - - Choose the specialized agent(s) capable of retrieving the required data. - - 4. **Delegate Tasks:** - - Assign the relevant task to the selected agent(s) using the appropriate inputs. - - 5. **Compile Responses:** - - Gather and compile the data returned by the worker agents into a cohesive response. - - 6. **Respond to User:** - - Provide a detailed and accurate answer to the user based on the compiled data. - - **Worker Agents and Their Specializations:** - 1. **Current Temperature Retrieval Agent** - - Task: Provide the current temperature based on the user's location. - - Required Inputs: User's location (latitude and longitude). - - API Example: `request_metar_nearest("38", "-96")` - - 2. **Current Weather Description Agent** - - Task: Construct a narrative weather description based on current conditions. - - Required Inputs: User's location (latitude and longitude). - - API Example: `request_metar_nearest("38", "-96")` - - 3. **Rainfall Accumulation Agent** - - Task: Provide the accumulated rainfall at the user's location for the last 24 hours. - - Required Inputs: User's location (latitude and longitude). - - API Example: `point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)` - - 4. **Cloud Coverage Forecast Agent** - - Task: Provide the cloud coverage forecast for the user's location for the next day. - - Required Inputs: User's location (latitude and longitude). - - API Example: `request_ndfd_basic(34.730301, -86.586098, forecast_time)` - - 5. **Precipitation Forecast Agent** - - Task: Provide the precipitation forecast for the user's location for the next 6 hours. - - Required Inputs: User's location (latitude and longitude). - - API Example: `point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)` - - 6. **Maximum Temperature Forecast Agent** - - Task: Provide the maximum forecasted temperature for the user's location for today. - - Required Inputs: User's location (latitude and longitude). - - API Example: `request_ndfd_basic(34.730301, -86.586098, forecast_time)` - - 7. **Wind Speed Forecast Agent** - - Task: Provide the maximum wind speed forecast for the user's location for today. - - Required Inputs: User's location (latitude and longitude). - - API Example: `point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)` - - **Example Workflow:** - 1. **User Query:** - - "What is the current temperature and will it rain in the next 6 hours at my location?" - - 2. **Identify Required Data:** - - Current temperature and precipitation forecast. - - 3. **Select Appropriate Agents:** - - Current Temperature Retrieval Agent - - Precipitation Forecast Agent - - 4. **Delegate Tasks:** - - Current Temperature Retrieval Agent: `request_metar_nearest("38", "-96")` - - Precipitation Forecast Agent: `point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)` - - 5. **Compile Responses:** - - Gather responses from both agents. - - 6. **Respond to User:** - - "The current temperature at your location is X degrees. There is/is not expected to be precipitation in the next 6 hours." - - By following this structured approach, you can efficiently manage user queries and provide accurate, detailed weather information. - """ - - -# Define the schema for the HierarchicalSwarmResponse -def DIRECTOR_SCHEMA() -> str: - return """ - - { - "type": "object", - "properties": { - "task_id": { - "type": "string", - "description": "Unique identifier for the task" - }, - "user_query": { - "type": "string", - "description": "The query provided by the user" - }, - "agents": { - "type": "array", - "description": "List of agents to handle the query", - "items": { - "type": "object", - "properties": { - "agent_name": { - "type": "string", - "description": "Name of the specialized agent" - }, - "task": { - "type": "string", - "description": "Task description for the agent" - }, - }, - "required": ["agent_name", "task"] - } - } - }, - "required": ["task_id", "user_query", "agents"] - } - - """ - - -def DIRECTOR_AGENT_CALLING_FEW_SHOT() -> str: - return """ - - { - "task_id": "1", - "user_query": "What is the current temperature at my location?", - "agents": [ - { - "agent_name": "Current Temperature Retrieval Agent", - "task": "Provide the current temperature based on the user's location.", - } - ] - } - - - ########## "What is the current temperature and will it rain in the next 6 hours at my location? ######### - - { - "task_id": "2", - "user_query": "What is the current temperature and will it rain in the next 6 hours at my location?", - "agents": [ - { - "agent_name": "Current Temperature Retrieval Agent", - "task": "Provide the current temperature based on the user's location.", - }, - { - "agent_name": "Precipitation Forecast Agent", - "task": "Provide the precipitation forecast for the user's location for the next 6 hours.", - } - ] - } - - ########### END OF EXAMPLES ########### - - ############# Example 3: Maximum Temperature and Wind Speed Forecast ######### - { - "task_id": "3", - "user_query": "What is the maximum temperature and wind speed forecast for today at my location?", - "agents": [ - { - "agent_name": "Maximum Temperature Forecast Agent", - "task": "Provide the maximum forecasted temperature for the user's location for today.", - }, - { - "agent_name": "Wind Speed Forecast Agent", - "task": "Provide the maximum wind speed forecast for the user's location for today.", - } - ] - } - - - ############ End of Example 3 ############ - - ############ Example 4: Rainfall Accumulation and Cloud Coverage Forecast ######### - { - "task_id": "4", - "user_query": "How much rain fell at my location in the last 24 hours and what is the cloud coverage forecast for tomorrow?", - "agents": [ - { - "agent_name": "Rainfall Accumulation Agent", - "task": "Provide the accumulated rainfall at the user's location for the last 24 hours.", - }, - { - "agent_name": "Cloud Coverage Forecast Agent", - "task": "Provide the cloud coverage forecast for the user's location for the next day.", - } - ] - } - - ############ End of Example 4 ############ - - """ - - -# [C]reate a new agent -agent = Agent( - agent_name="Weather Director Agent", - system_prompt=DIRECTOR_SYSTEM_PROMPT(), - sop_list=[ - GLOSSARY_PROMPTS, - DIRECTOR_SCHEMA(), - DIRECTOR_AGENT_CALLING_FEW_SHOT(), - ], - # sop=list_tool_schemas_json, - llm=llama3Hosted(max_tokens=1000), - max_loops=1, - autosave=True, - dashboard=False, - streaming_on=True, - # interactive=True, - verbose=True, - # Set the output type to the tool schema which is a BaseModel - output_type=str, # or dict, or str - metadata_output_type="json", - # List of schemas that the agent can handle - function_calling_format_type="OpenAI", - function_calling_type="json", # or soon yaml - # return_history=True, -) - -# Run the agent to generate the person's information -generated_data = agent.run( - "Are there any chances of rain today in Huntsville?" -) - -# Print the generated data -print(f"Generated data: {generated_data}") diff --git a/playground/weatherman_agent/todo/worker_agents.py b/playground/weatherman_agent/todo/worker_agents.py deleted file mode 100644 index ed8d090f..00000000 --- a/playground/weatherman_agent/todo/worker_agents.py +++ /dev/null @@ -1,269 +0,0 @@ -from swarms import Agent -from swarms import llama3Hosted -from pydantic import BaseModel, Field -from weather_swarm.tools.tools import ( - request_metar_nearest, - point_query, - request_ndfd_basic, - point_query_region, - request_ndfd_hourly, -) - - -class WeatherRequest(BaseModel): - """ - A class to represent the weather request. - - Attributes - ---------- - query : str - The user's query. - """ - - task: str = Field(..., title="The user's query") - tool: str = Field(None, title="The tool to execute") - - -def current_temperature_retrieval_agent(): - return """ - ### Current Temperature Retrieval Agent - - **Prompt:** - As a specialized weather data agent, your task is to provide the current temperature based on the user's location. Ensure accuracy and up-to-date information. - - **Goal:** - Allow the user to request the current temperature for their location. - - **Required Inputs:** - User's location (latitude and longitude). - - **API Example:** - request_metar_nearest("38", "-96") - """ - - -def current_weather_description_agent(): - return """ - ### Current Weather Description Agent - - **Prompt:** - As a specialized weather data agent, your task is to construct a narrative weather description based on the current conditions at the user's location. - - **Goal:** - Have the LLM construct a narrative weather description based on current conditions. - - **Required Inputs:** - User's location (latitude and longitude). - - **API Example:** - request_metar_nearest("38", "-96") - """ - - -def rainfall_accumulation_agent(): - return """ - ### Rainfall Accumulation Agent - - **Prompt:** - As a specialized weather data agent, your task is to provide the accumulated rainfall at the user's location for the last 24 hours. - - **Goal:** - Allow the user to determine how much rain has accumulated at their location in the last 24 hours. - - **Required Inputs:** - User's location (latitude and longitude). - - **API Example:** - point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4) - """ - - -def cloud_coverage_forecast_agent(): - return """ - ### Cloud Coverage Forecast Agent - - **Prompt:** - As a specialized weather data agent, your task is to provide the cloud coverage forecast for the user's location for the next day. - - **Goal:** - Allow the user to determine cloud coverage for their location. - - **Required Inputs:** - User's location (latitude and longitude). - - **API Example:** - request_ndfd_basic(34.730301, -86.586098, forecast_time) - """ - - -def precipitation_forecast_agent(): - return """ - ### Precipitation Forecast Agent - - **Prompt:** - As a specialized weather data agent, your task is to provide the precipitation forecast for the user's location for the next 6 hours. - - **Goal:** - Allow the user to determine if precipitation will fall in the coming hours. - - **Required Inputs:** - User's location (latitude and longitude). - - **API Example:** - point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4) - """ - - -def maximum_temperature_forecast_agent(): - return """ - ### Maximum Temperature Forecast Agent - - **Prompt:** - As a specialized weather data agent, your task is to provide the maximum forecasted temperature for the user's location for today. - - **Goal:** - Allow the user to determine how hot or cold the air temperature will be. - - **Required Inputs:** - User's location (latitude and longitude). - - **API Example:** - request_ndfd_basic(34.730301, -86.586098, forecast_time) - """ - - -def wind_speed_forecast_agent(): - return """ - ### Wind Speed Forecast Agent - - **Prompt:** - As a specialized weather data agent, your task is to provide the maximum wind speed forecast for the user's location for today. - - **Goal:** - Allow the user to determine the maximum wind speed for that day. - - **Required Inputs:** - User's location (latitude and longitude). - - **API Example:** - point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4) - """ - - -llm = llama3Hosted( - max_tokens=1000, - temperature=0.5, -) - - -# Define the agents with their specific prompts -temp_tracker = Agent( - agent_name="TempTracker", - system_prompt=current_temperature_retrieval_agent(), - llm=llm, - max_loops=1, - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - tools=[request_metar_nearest], -) - -weather_narrator = Agent( - agent_name="WeatherNarrator", - system_prompt=current_weather_description_agent(), - llm=llm, - max_loops=1, - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - tools=[request_metar_nearest], -) - -rain_gauge = Agent( - agent_name="RainGauge", - system_prompt=rainfall_accumulation_agent(), - llm=llm, - max_loops=1, - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - tools=[point_query], -) - -cloud_predictor = Agent( - agent_name="CloudPredictor", - system_prompt=cloud_coverage_forecast_agent(), - llm=llm, - max_loops=1, - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - tools=[request_ndfd_basic], -) - -rain_forecaster = Agent( - agent_name="RainForecaster", - system_prompt=precipitation_forecast_agent(), - llm=llm, - max_loops=1, - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - tools=[point_query_region], -) - -temp_forecaster = Agent( - agent_name="TempForecaster", - system_prompt=maximum_temperature_forecast_agent(), - llm=llm, - max_loops=1, - verbose=True, - output_type=dict, - autosave=True, - dashboard=False, - streaming_on=True, - stopping_token="", - tools=[request_ndfd_hourly], -) - -wind_watcher = Agent( - agent_name="WindWatcher", - system_prompt=wind_speed_forecast_agent(), - llm=llm, - max_loops=1, - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - tools=[point_query_region], -) - -# Create a list -agents = [ - temp_tracker, - weather_narrator, - rain_gauge, - cloud_predictor, - rain_forecaster, - temp_forecaster, - wind_watcher, -] - -# # Create a hierarchical swarm -# swarm = HiearchicalSwarm( -# name = "WeatherSwarm", -# description="A swarm of weather agents", -# agents=agents, -# director = -# ) diff --git a/playground/weatherman_agent/weather_agent.py b/playground/weatherman_agent/weather_agent.py deleted file mode 100644 index 998b8922..00000000 --- a/playground/weatherman_agent/weather_agent.py +++ /dev/null @@ -1,50 +0,0 @@ -from dotenv import load_dotenv -from swarms import Agent, OpenAIChat - -from weather_swarm.prompts import ( - FEW_SHORT_PROMPTS, - GLOSSARY_PROMPTS, - WEATHER_AGENT_SYSTEM_PROMPT, -) -from weather_swarm.tools.tools import ( - point_query, - request_ndfd_basic, - request_ndfd_hourly, -) - -# Load the environment variables -load_dotenv() - - -# Purpose = To generate weather information for the user and send API requests to the Baron Weather API -agent = Agent( - agent_name="WeatherMan Agent", - system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, - sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], - # sop=list_tool_schemas_json, - llm=OpenAIChat(), - max_loops=1, - # interactive=True, - dynamic_temperature_enabled=True, - verbose=True, - # Set the output type to the tool schema which is a BaseMode - output_type=str, # or dict, or str - tools=[ - # request_metar_nearest, - point_query, - request_ndfd_basic, - # point_query_region, - request_ndfd_hourly, - ], - docs_folder="datasets", # Add every document in the datasets folder - metadata="json", - function_calling_format_type="OpenAI", - function_calling_type="json", -) - -# Run the agent to generate the person's information -# Run the agent to generate the person's information -output = agent.run("Are there any chances of rain today in Huntsville?") -# # Write the output to a new file -# with open('output.txt', 'w') as f: -# f.write(str(output)) \ No newline at end of file diff --git a/playground/weatherman_agent/weather_swarm/__init__.py b/playground/weatherman_agent/weather_swarm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/playground/weatherman_agent/weather_swarm/prompts.py b/playground/weatherman_agent/weather_swarm/prompts.py deleted file mode 100644 index 83c32499..00000000 --- a/playground/weatherman_agent/weather_swarm/prompts.py +++ /dev/null @@ -1,152 +0,0 @@ -GLOSSARY_PROMPTS = """ - -Glossary - -API Terminology -Access_key -A private access key or shared access key (not a secret and not an Application Key) used to access the Baron Weather API. View your access keys on your account page. - -Application Key -Users’ personal and confidential key from which access keys are derived. The application key allows management of access keys. View your application key on your account page. - -Configuration_code -Configuration codes are most often used to differentiate between EPSG:3857 (Mercator) and EPSG:4326 (Geodetic) projections. In the Baron Weather API we add a descriptor to the beginning to indicate any additional parameters to the projection. The default descriptor is ‘Standard’ and will be the primary configuration used, but some data products may offer alternative descriptor to differentiate formatting options. - -Coordinated Universal Time (UTC) -This standard organizes the data so the largest temporal term (the year) appears first in the data string and progresses to the smallest term (the second), like so 2012-12-31TI8:51:23Z. - -Format -The language format for API responses. In the Baron Weather API, responses for text products can be in JSON or JSONP format, and graphical formats are always in png format. - -ISO8601 -The primary time standard by which the world regulates clocks and time. - -Max-age -It's an optional parameter for the metar, buoy, and cwop "nearest" api which allows developers to query a lat/lon and only get back data is more recent than the prescribed date and time. - -Metadata_timestamp -The ISO 8601 UTC date/time for the data found in the returned metadata "time" parameter(s). - -Metadata_valid_time -The ISO 8601 UTC date/time for the data found in the returned metadata "valid_times" list. This is required for forecast products (those that provide a valid_times list in the metadata), but unnecessary for non-forecast products. - -Pages -The page parameter was put in place to minimize the amount of information returned in the response. Text products that support the page parameter return the current page number and the total number of pages when you make a request. Many text products provide thousands of lines of data, which can be overwhelming when users are looking for a specific piece of information for a specific time frame. For example, a developers looking for the current weather conditions at all METAR stations will not need to have thousands of lines of text returned. Instead, we limit them to a maximum number of stations per page, then if users want the full set, they have to ask explicitly for page 2, page 3, etc. in the request URL. - -Product Code -The code to include in the API URL request that is specific to each weather product. - -Reference Time -The time the forecast model begins. In the product-instances metadata, this is called "time". - -Timestamp -The timestamp value included with the request and used to create the signature. Represented as ‘ts’ in request and always in UTC format. - -Timestep -In general, a single point in time for which the product is valid, also called "valid_times". However for accumulation products, the timesteps represent the end of a measured time interval for which total accumulated precipitation is forecast. A list of timesteps or "valid_times" are provided In the product-instances metadata. - -Timestep Interval -The interval between timesteps. - -Valid_times -The list of UTC-formatted timesteps for a forecast product when the Product Instances API is run. - -X -The x-coordinate of the requested tile. This value represents the horizontal index of the tile, assuming an origin of the lower left corner of the tile grid (0,0). These coordinates correspond to the Tile Map Service Specification. - -Y -The y-coordinate of the requested tile. This value represents the vertical index of the tile, assuming an origin of the lower left corner of the tile grid (0,0). These coordinates correspond to the Tile Map Service Specification. - -Z -The z-coordinate of the requested tile. This value represents the zoom level (depth) of the tile. A value of 0 shows the entire world using the minimum number amount of tiles (1 for Mercator, 2 for Geodetic). The maximum available depth may vary by product. These coordinates correspond to the Tile Map Service Specification. - - - - -Meteorological Terminology -dBZ -Stands for decibels relative to Z. It is a meteorological measure of equivalent reflectivity (Z) of a radar signal reflected off a remote object. - -Dew Point -The temperature below which the water vapor in a volume of humid air at a constant barometric pressure will condense into liquid water. - -Heat Index -An index that combines air temperature and relative humidity in an attempt to determine the human-perceived equivalent temperature — how hot it feels. - -Infrared (IR) -In relation to satellite imagery, infrared imagery is produced by satellite analysis of infrared wavelengths. This analysis indicates the temperature of air masses, making it possible to identify cloud cover day or night. - -kft -Stands for thousands of feet. - -Relative Humidity -The ratio of the partial pressure of water vapor in an air-water mixture to the saturated vapor pressure of water at a given temperature. - -Valid Time Event Code (VTEC) -Format in which alerting information is pulled from the National Weather Service. - -Visible Satellite (VIS) -Visible satellite imagery is a snapshot of cloud cover from space. Consequently it is only usable during daylights hours. It is the easiest weather data product for laypeople to understand. - -Warnings -The NWS issues a warning when a hazardous weather or hydrologic event is occurring, is imminent, or has a very high probability of occurring. Often warnings are not issued until conditions have been visually verified. A warning is used for conditions posing a threat to life or property. - -Watches -The NWS issues a watch when the risk of a hazardous weather or hydrologic event has increased significantly, but its occurrence, location, and/or timing is still uncertain. It is intended to provide enough lead time so that those who need to set their plans in motion can do so. - -Water Vapor Satellite -Water vapor imagery is a satellite product which measures the amount of moisture in the atmosphere above 10,000 feet. Bright white areas indicate abundant moisture, which may be converted into clouds or precipitation. Darker areas indicate the presence of drier air. In addition to measuring moisture, water vapor imagery is useful in detecting large scale weather patterns, such as jet streams. - -Wave Dominant Period -The period in seconds between successive waves. - -Wave Height -The maximum reported or forecasted wave height. - -Wind Chill -The perceived decrease in air temperature felt by the body on exposed skin due to the flow of cold air. Wind chill temperature is defined only for temperatures at or below 10 °C (50 °F) and wind speeds above 4.8 kilometers per hour (3.0 mph). - -Wind Gust -A sudden, brief increase in speed of wind. According to US weather observing practice, gusts are reported when the peak wind speed reaches at least 16 knots and the variation in wind speed between the peaks and lulls is at least 9 knots. The duration of a gust is usually less than 20 seconds. - -""" - -WEATHER_AGENT_SYSTEM_PROMPT = """ - -You navigate through tasks efficiently. Whether you're learning something new or need assistance with daily tasks, I can provide information, suggestions, and step-by-step guidance. - -#### How I Can Help: -- **Information Retrieval:** I can fetch and summarize information on a wide range of topics. -- **Problem Solving:** I offer solutions and strategies to address specific challenges. -- **Learning Support:** I assist in understanding new concepts and procedures. - -#### Example: Using the Baron Weather API - -Let's look at how you can use the Baron Weather API to retrieve weather data, which involves making authenticated HTTP requests. - -1. **Understand Your Needs**: Identify what specific weather data you need, such as current conditions or a forecast. -2. **Gather API Details**: Know your API key, the endpoints available, and the data format (JSON). -3. **Authentication**: Learn how to authenticate your requests using your API key and additional security measures as required (like generating signatures). -4. **Craft the Request**: Construct the correct HTTP request to fetch the data you need. -5. **Parse the Response**: After making the request, interpret the JSON response to extract and utilize the weather data. - -Through each step, I can provide explanations, code snippets, and troubleshooting tips to ensure you successfully achieve your goal. - -### Conclusion - -With these steps, you'll be better prepared to use tools like APIs effectively and get the most out of our interactions. If you have questions or need further assistance, feel free to ask! - ---- - -""" - - -FEW_SHORT_PROMPTS = """ -What is the current temperature? allow the user to request the current temperature for their location user's location request_metar_nearest("38", "-96") -Describe the current weather. have the LLM construct a narrative weather description based on current conditions user's location request_metar_nearest("38", "-96") -How much rain fell at my location? allow the user to determine how much rain has accumulated at their location in the last 24 hours user's location point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4) -Is it going to be sunny tomorrow? allow the user to determine cloud coverage for their location user's location request_ndfd_basic(34.730301, -86.586098, forecast_time) -Is rain expected at my location in the next 6 hours? allow the user to determine if precip will fall in the coming hours user's location point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4) -What is the max forecasted temperature today? allow the user to determine how hot or cold the air temp will be user's location request_ndfd_basic(34.730301, -86.586098, forecast_time) -Will it be windy today? allow the user to determine the max wind speed for that day user's location point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4) -""" diff --git a/playground/weatherman_agent/weather_swarm/tools/__init__.py b/playground/weatherman_agent/weather_swarm/tools/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py b/playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py deleted file mode 100644 index fba0361b..00000000 --- a/playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py +++ /dev/null @@ -1,145 +0,0 @@ -from pydantic import BaseModel, Field - - -class RequestMetarNearest(BaseModel): - latitude: str = Field( - ..., - description=( - "The latitude of the location for which the nearest METAR" - " station is requested." - ), - ) - longitude: str = Field( - ..., - description=( - "The longitude of the location for which the nearest" - " METAR station is requested." - ), - ) - - -class PointQueryPrecipTotalAccum24Hr(BaseModel): - layer: str = Field( - ..., - description=( - "The layer of the precipitation total accumulation in the" - " last 24 hours." - ), - ) - projection: str = Field( - ..., - description=( - "The projection of the location for which the" - " precipitation total accumulation is requested." - ), - ) - longitude: float = Field( - ..., - description=( - "The longitude of the location for which the" - " precipitation total accumulation is requested." - ), - ) - latitude: float = Field( - ..., - description=( - "The latitude of the location for which the precipitation" - " total accumulation is requested." - ), - ) - - -class RequestNDFDBasic(BaseModel): - latitude: float = Field( - ..., - description=( - "The latitude of the location for which the NDFD basic" - " forecast is requested." - ), - ) - longitude: float = Field( - ..., - description=( - "The longitude of the location for which the NDFD basic" - " forecast is requested." - ), - ) - forecast_time: str = Field( - ..., - description=( - "The forecast time for which the NDFD basic forecast is" - " requested." - ), - ) - - -class PointQueryBaronHiresMaxReflectivityDbzAll(BaseModel): - layer: str = Field( - ..., - description=( - "The layer of the maximum reflectivity in dBZ for all" - " heights." - ), - ) - projection: str = Field( - ..., - description=( - "The projection of the location for which the maximum" - " reflectivity is requested." - ), - ) - longitude: float = Field( - ..., - description=( - "The longitude of the location for which the maximum" - " reflectivity is requested." - ), - ) - latitude: float = Field( - ..., - description=( - "The latitude of the location for which the maximum" - " reflectivity is requested." - ), - ) - - -class PointQueryBaronHiresWindSpeedMph10Meter(BaseModel): - layer: str = Field( - ..., - description=( - "The layer of the wind speed in mph at 10 meters above" - " ground level." - ), - ) - projection: str = Field( - ..., - description=( - "The projection of the location for which the wind speed" - " is requested." - ), - ) - longitude: float = Field( - ..., - description=( - "The longitude of the location for which the wind speed" - " is requested." - ), - ) - latitude: float = Field( - ..., - description=( - "The latitude of the location for which the wind speed is" - " requested." - ), - ) - - -def _remove_a_key(d: dict, remove_key: str) -> None: - """Remove a key from a dictionary recursively""" - if isinstance(d, dict): - for key in list(d.keys()): - if key == remove_key and "type" in d.keys(): - del d[key] - else: - _remove_a_key(d[key], remove_key) diff --git a/playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py b/playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py deleted file mode 100644 index a5d5f1ba..00000000 --- a/playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py +++ /dev/null @@ -1,109 +0,0 @@ -import requests -from typing import List, Dict, Any - - -def fetch_geocode_by_city( - api_key: str, city: str, timestamp: int, signature: str -) -> List[Dict[str, Any]]: - """ - Fetch geocode data by city name. - - Args: - api_key (str): The API key for authentication. - city (str): The name of the city (e.g., "Austin, Tx"). - timestamp (int): The timestamp for the request. - signature (str): The signature for the request. - - Returns: - List[Dict[str, Any]]: Geocode data for the specified city. - - Raises: - Exception: If the request fails or the response is invalid. - """ - url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/city.json" - params = {"name": city, "ts": timestamp, "sig": signature} - try: - response = requests.get(url, params=params) - response.raise_for_status() - data = response.json() - return data.get("geocode", {}).get("data", []) - except requests.RequestException as e: - raise Exception(f"Failed to fetch geocode data by city: {e}") - except ValueError: - raise Exception("Invalid response format.") - - -def fetch_geocode_by_address( - api_key: str, address: str, timestamp: int, signature: str -) -> List[Dict[str, Any]]: - """ - Fetch geocode data by address. - - Args: - api_key (str): The API key for authentication. - address (str): The address (e.g., "3305 Northland Dr, Austin, Tx"). - timestamp (int): The timestamp for the request. - signature (str): The signature for the request. - - Returns: - List[Dict[str, Any]]: Geocode data for the specified address. - - Raises: - Exception: If the request fails or the response is invalid. - """ - url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/address.json" - params = {"location": address, "ts": timestamp, "sig": signature} - try: - response = requests.get(url, params=params) - response.raise_for_status() - data = response.json() - return data.get("geocode", {}).get("data", []) - except requests.RequestException as e: - raise Exception( - f"Failed to fetch geocode data by address: {e}" - ) - except ValueError: - raise Exception("Invalid response format.") - - -def fetch_geocode_by_zip( - api_key: str, - zip_code: str, - us: int, - timestamp: int, - signature: str, -) -> List[Dict[str, Any]]: - """ - Fetch geocode data by zip code. - - Args: - api_key (str): The API key for authentication. - zip_code (str): The zip code (e.g., "13060"). - us (int): Indicator for US zip code (1 for US, 0 for other). - timestamp (int): The timestamp for the request. - signature (str): The signature for the request. - - Returns: - List[Dict[str, Any]]: Geocode data for the specified zip code. - - Raises: - Exception: If the request fails or the response is invalid. - """ - url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/zip.json" - params = { - "zip": zip_code, - "us": us, - "ts": timestamp, - "sig": signature, - } - try: - response = requests.get(url, params=params) - response.raise_for_status() - data = response.json() - return data.get("geocode", {}).get("data", []) - except requests.RequestException as e: - raise Exception( - f"Failed to fetch geocode data by zip code: {e}" - ) - except ValueError: - raise Exception("Invalid response format.") diff --git a/playground/weatherman_agent/weather_swarm/tools/tools.py b/playground/weatherman_agent/weather_swarm/tools/tools.py deleted file mode 100644 index 68b20f44..00000000 --- a/playground/weatherman_agent/weather_swarm/tools/tools.py +++ /dev/null @@ -1,1281 +0,0 @@ -# coding: utf-8 - -import base64 -import hashlib -import hmac -import shutil -import time -from urllib.request import urlopen -from urllib.request import Request -from urllib.error import URLError -import os -import json -import codecs -from dotenv import load_dotenv -import datetime - -from typeguard import typechecked -from typing import Union - -load_dotenv() - -latin1 = codecs.lookup("latin-1") - -host = os.environ.get( - "BARON_API_HOST", "http://api.velocityweather.com/v1" -) -access_key = os.environ.get("BARON_ACCESS_KEY", "Y5lHXZfgce7P") -access_key_secret = os.environ.get( - "BARON_ACCESS_KEY_SECRET", - "rcscpInzyLuweENUjUtFDmqLkK1N0EPeaWQRjy7er1", -) - - -@typechecked -def a2w(a: bytes) -> str: - """ - Decodes a byte string using Latin-1 encoding and returns the first character of the decoded string. - - Args: - a (bytes): The byte string to be decoded. - - Returns: - str: The first character of the decoded string. - """ - return latin1.decode(a)[0] - - -@typechecked -def sig(key: str, secret: str) -> str: - """ - Generates a signed string using HMAC-SHA1 and base64 encoding. - - Args: - key (str): The key used for signing. - secret (str): The secret used for signing. - - Returns: - str: The signed string in the format "sig={signature}&ts={timestamp}". - """ - - ts = "{:.0f}".format(time.time()) - to_sign = key + ":" + ts - hashval = hmac.new( - secret.encode("utf-8"), to_sign.encode("utf-8"), hashlib.sha1 - ) - sig = a2w( - base64.urlsafe_b64encode(hashval.digest()).replace( - b"=", b"%3D" - ) - ) - return "sig={}&ts={}".format(sig, ts) - - -@typechecked -def sign_request(url: str, key: str, secret: str) -> str: - """ - Returns a signed URL by appending the signature and timestamp. - - Args: - url (str): The URL to be signed. - key (str): The key used for signing. - secret (str): The secret used for signing. - - Returns: - str: The signed URL with the signature and timestamp appended as query parameters. - """ - - """Returns signed url""" - - signature = sig(key, secret) - q = "?" if url.find("?") == -1 else "&" - url += "{}{}".format(q, signature) - return url - - -########## [START] API REQUESTS ########## -@typechecked -def request_pointquery_nws_watches_warning_all() -> str: - """ - Constructs a URL for querying all NWS watches and warnings for a specific point and signs the request. - - Returns: - str: The signed URL for the point query. - """ - - uri = "/reports/alert/all-poly/point.json?lat=29.70&lon=-80.41" - url = "%s/%s%s" % (host, access_key, uri) - return sign_request(url, access_key, access_key_secret) - - -@typechecked -def request_lightning_count() -> str: - """ - Constructs a URL for querying the count of lightning strikes in a specified region and signs the request. - - Returns: - str: The signed URL for the lightning count query. - """ - - uri = "/reports/lightning/count/region.json?w_lon=-160&e_lon=0&n_lat=-2&s_lat=-70" - url = "%s/%s%s" % (host, access_key, uri) - return sign_request(url, access_key, access_key_secret) - - -@typechecked -def request_storm_vector(sitecode: str) -> str: - """ - Constructs a URL for querying the storm vector for a specific site and signs the request. - - Args: - sitecode (str): The code of the site for which the storm vector is being queried. - - Returns: - str: The signed URL for the storm vector query. - """ - - uri = "/reports/stormvector/station/%s.json" % (sitecode) - url = "%s/%s%s" % (host, access_key, uri) - return sign_request(url, access_key, access_key_secret) - - -@typechecked -def request_geocodeip() -> str: - """ - Constructs a URL for querying the geocode information of an IP address and signs the request. - - Returns: - str: The signed URL for the geocode IP query. - """ - - uri = "/reports/geocode/ipaddress.json" - url = "%s/%s%s" % (host, access_key, uri) - url = sign_request(url, access_key, access_key_secret) - return sign_request(url, access_key, access_key_secret) - - -@typechecked -def request_forecast(lat: float, lon: float) -> dict: - """ - Constructs a URL for querying a 7-day point forecast for a specific latitude and longitude, signs the request, and retrieves the forecast data. - - Args: - lat (float): The latitude for the forecast query. - lon (float): The longitude for the forecast query. - - Returns: - dict: The forecast data for the specified point if the request is successful, otherwise an empty dictionary. - """ - uri = "/reports/pointforecast/basic.json?days=7&lat={}&lon={}".format(lat, lon) - url = "%s/%s%s" % (host, access_key, uri) - url = sign_request(url, access_key, access_key_secret) - - try: - response = urlopen(url) - except URLError as e: - print(e) - return {} - except ValueError as e: - print(e) - return {} - - assert response.code == 200 - data = json.loads(response.read()) - - forecast_data = data.get("pointforecast_basic", {}).get("data", {}) - if isinstance(forecast_data, dict): - return forecast_data - else: - return {"forecast_data": forecast_data} - - -@typechecked -def request_metar_northamerica() -> None: - """ - Constructs a URL for querying METAR data for North America, signs the request, and retrieves the data. - Processes the METAR data and associated forecasts, then saves the data to a JSON file. - - Returns: - None - """ - - uri = "/reports/metar/region.json?n_lat=51.618017&s_lat=23.241346&w_lon=-129.375000&e_lon=-60.644531" - url = "%s/%s%s" % (host, access_key, uri) - url = sign_request(url, access_key, access_key_secret) - - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - - assert response.code == 200 - data = json.loads(response.read()) - - metars = {} - pages = data["metars"]["meta"]["pages"] - - print("processing {} pages of METAR data".format(pages)) - - for i in range(1, pages + 1): - print("processing page {}".format(i)) - page_url = url + "&page={}".format(i) - try: - response = urlopen(page_url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - - assert response.code == 200 - data = json.loads(response.read()) - for metar in data["metars"]["data"]: - siteid = metar["station"]["id"] - print("processing site {}".format(siteid)) - forecast = request_forecast( - metar["station"]["coordinates"][1], - metar["station"]["coordinates"][0], - ) - - metars[siteid] = {"metar": metar, "forecast": forecast} - - with open("metar.json", "w") as metar_jsonfile: - json.dump(metars, metar_jsonfile, indent=4, sort_keys=True) - - - -@typechecked -def request_metar_nearest(lat: str, lon: str): - """ - Requests the nearest METAR (Meteorological Aerodrome Report) data based on the given latitude and longitude. - - Args: - lat (str): The latitude of the location. - lon (str): The longitude of the location. - - Returns: - str: The signed request URL for retrieving the METAR data. - """ - uri = ( - "/reports/metar/nearest.json?lat=%s&lon=%s&within_radius=500&max_age=75" - % ( - lat, - lon, - ) - ) - url = "%s/%s%s" % (host, access_key, uri) - return sign_request(url, access_key, access_key_secret) - - -@typechecked -def request_metar(station_id: str) -> str: - """ - Constructs a URL for querying METAR data for a specific station and signs the request. - - Args: - station_id (str): The ID of the station for which the METAR data is being queried. - - Returns: - str: The signed URL for the METAR query. - """ - - uri = "/reports/metar/station/%s.json" % station_id - url = "%s/%s%s" % (host, access_key, uri) - return sign_request(url, access_key, access_key_secret) - - -@typechecked -def request_ndfd_hourly(lat: float, lon: float, utc_datetime: datetime.datetime) -> str: - """ - Requests NDFD hourly data for a specific latitude, longitude, and UTC datetime. - - Args: - lat (float): The latitude of the location. - lon (float): The longitude of the location. - utc_datetime (datetime.datetime): The UTC datetime for the request. - - Returns: - str: The signed URL for the request. - """ - datetime_str = ( - utc_datetime.replace(microsecond=0).isoformat() + "Z" - ) - uri = f"/reports/ndfd/hourly.json?lat={lat}&lon={lon}&utc={datetime_str}" - url = f"{host}/{access_key}{uri}" - return sign_request(url, access_key, access_key_secret) - - -@typechecked -def request_ndfd_basic(lat: float, lon: float, utc_datetime: datetime.datetime) -> str: - """ - Requests NDFD basic data for a specific latitude, longitude, and UTC datetime. - - Args: - lat (float): The latitude of the location. - lon (float): The longitude of the location. - utc_datetime (datetime.datetime): The UTC datetime for the request. - - Returns: - str: The signed URL for the request. - """ - - datetime_str = ( - utc_datetime.replace(microsecond=0).isoformat() + "Z" - ) - uri = f"/reports/ndfd/basic.json?lat={lat}&lon={lon}&utc={datetime_str}&days=7" - url = f"{host}/{access_key}{uri}" - return sign_request(url, access_key, access_key_secret) - - -@typechecked -def request_tile(product: str, product_config: str, z: int, x: int, y: int) -> None: - """ - Requests a tile for a specific product and configuration, retrieves the data, and saves it as a PNG file. - - Args: - product (str): The product name. - product_config (str): The product configuration. - z (int): The zoom level. - x (int): The tile's x coordinate. - y (int): The tile's y coordinate. - - Returns: - None - """ - - url = "%s/%s/meta/tiles/product-instances/%s/%s" % ( - host, - access_key, - product, - product_config, - ) - url = sign_request(url, access_key, access_key_secret) - - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - data = json.loads(response.read()) - - # Select the most recent product instance for this example. - product_instance = data[0] - - url = "%s/%s/tms/1.0.0/%s+%s+%s/%d/%d/%d.png" % ( - host, - access_key, - product, - product_config, - product_instance["time"], - z, - x, - y, - ) - - try: - # If it's a forecast product, it will have valid_times. The latest one is used for this example. - url += "?valid_time={}".format( - product_instance["valid_times"][0] - ) - except KeyError: - pass - - url = sign_request(url, access_key, access_key_secret) - print(url) - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - print("headers:") - print( - json.dumps( - response.headers._headers, indent=4, sort_keys=True - ) - ) - - content = response.read() - filename = "./tms_img_{}_{}.png".format(product, product_config) - print( - "Read {} bytes, saving as {}".format(len(content), filename) - ) - with open(filename, "wb") as f: - f.write(content) - - -@typechecked -def point_query(product: str, product_config: str, lon: float, lat: float) -> None: - """ - Queries the most recent 'time' and, if applicable, 'valid_time' for a given product and product configuration at a specified longitude and latitude point. - - Args: - product (str): The product name. - product_config (str): The product configuration. - lon (float): The longitude of the location. - lat (float): The latitude of the location. - - Returns: - None - """ - # Get the list of product instances. - url = "{host}/{key}/meta/tiles/product-instances/{product}/{product_config}".format( - host=host, - key=access_key, - product=product, - product_config=product_config, - ) - url = sign_request(url, access_key, access_key_secret) - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - data = json.loads(response.read()) - - # Select the most recent product instance for this example. - product_instance = data[0] - - # Query our lon, lat point. - url = "{host}/{key}/point/{product}/{product_config}/{product_instance}.{file_type}?lon={lon}&lat={lat}".format( - host=host, - key=access_key, - product=product, - product_config=product_config, - product_instance=product_instance["time"], - file_type="json", - lon=lon, - lat=lat, - ) - - try: - if product_instance["valid_times"][0]: - # If it's a forecast product, it will have valid_times. Display them all - url += "&valid_time=*" - - # If it's a forecast product, it will have valid_times. The latest one is used for this example. - # url += '&valid_time={}'.format(product_instance['valid_times'][0]) - - except KeyError: - pass - - url = sign_request(url, access_key, access_key_secret) - print(url) - - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - content = response.read() - charset = response.headers.get_param("charset") - if charset: - content = content.decode(charset) - content = json.loads(content) - - print("headers:") - print( - json.dumps( - response.headers._headers, indent=4, sort_keys=True - ) - ) - print("content:") - print( - json.dumps( - content, indent=4, sort_keys=True, ensure_ascii=False - ) - ) - - -@typechecked -def point_query_multi(product: str, product_config: str, points: 'list[tuple[float, float]]') -> None: - """ - For the given product and product_config, queries the most recent 'time' - (and most recent 'valid_time' if it's a forecast product) for a list of points. - - Args: - product (str): The product name. - product_config (str): The product configuration. - points (list[tuple[float, float]]): A list of tuples, each containing the longitude and latitude of a point. - - Returns: - None - """ - - """ - For the given product and product_config, queries the most recent 'time' - (and most recent 'valid_time' if it's a forecast product). - """ - - # Get the list of product instances. - url = "{host}/{key}/meta/tiles/product-instances/{product}/{product_config}".format( - host=host, - key=access_key, - product=product, - product_config=product_config, - ) - url = sign_request(url, access_key, access_key_secret) - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - data = json.loads(response.read()) - - # Select the most recent product instance for this example. - product_instance = data[0] - - def format_point(_p, _decimals=3): - return ",".join(str(round(_, _decimals)) for _ in _p) - - # Query our list of lon, lat points - url = "{host}/{key}/point/multi/{product}/{product_config}/{product_instance}.{file_type}?points={points}".format( - host=host, - key=access_key, - product=product, - product_config=product_config, - product_instance=product_instance["time"], - file_type="json", - points="|".join(format_point(_) for _ in points), - ) - - try: - # If it's a forecast product, it will have valid_times. The latest one is used for this example. - url += "&valid_time={}".format( - product_instance["valid_times"][0] - ) - except KeyError: - pass - - url = sign_request(url, access_key, access_key_secret) - print(url) - - try: - request = Request(url, headers={"Accept-Encoding": "gzip"}) - response = urlopen(request) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - if response.headers.get("Content-Encoding") == "gzip": - import gzip - import io - - compressed_file = io.BytesIO(response.read()) - decompressed_file = gzip.GzipFile(fileobj=compressed_file, mode="rb") - content = decompressed_file.read() - else: - content = response.read() - - charset = response.headers.get_param("charset") - if charset: - content = content.decode(charset) - content = json.loads(content) - - print("headers:") - print( - json.dumps( - response.headers._headers, indent=4, sort_keys=True - ) - ) - print("content:") - print( - json.dumps( - content, indent=4, sort_keys=True, ensure_ascii=False - ) - ) - - -@typechecked -def point_query_region(product: str, product_config: str, n_lat: float, s_lat: float, w_lon: float, e_lon: float) -> None: - """ - For the given product and product_config, queries the most recent 'time' - (and most recent 'valid_time' if it's a forecast product) for a specific region. - - Args: - product (str): The product name. - product_config (str): The product configuration. - n_lat (float): The northern latitude of the region. - s_lat (float): The southern latitude of the region. - w_lon (float): The western longitude of the region. - e_lon (float): The eastern longitude of the region. - - Returns: - None - """ - - """ - For the given product and product_config, queries the most recent 'time' - (and most recent 'valid_time' if it's a forecast product). - """ - - # Get the list of product instances. - url = "{host}/{key}/meta/tiles/product-instances/{product}/{product_config}".format( - host=host, - key=access_key, - product=product, - product_config=product_config, - ) - - url = sign_request(url, access_key, access_key_secret) - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - data = json.loads(response.read()) - - # Select the most recent product instance for this example. - product_instance = data[0] - - def format_value(_, _decimals=3): - return str(round(_, _decimals)) - - # Query our region - url = "{host}/{key}/point/region/{product}/{product_config}/{product_instance}.{file_type}?n_lat={n_lat}&s_lat={s_lat}&w_lon={w_lon}&e_lon={e_lon}".format( - host=host, - key=access_key, - product=product, - product_config=product_config, - product_instance=product_instance["time"], - file_type="json", - n_lat=format_value(n_lat), - s_lat=format_value(s_lat), - w_lon=format_value(w_lon), - e_lon=format_value(e_lon), - ) - - try: - # If it's a forecast product, it will have valid_times. The latest one is used for this example. - url += "&valid_time={}".format( - product_instance["valid_times"][0] - ) - except KeyError: - pass - - url = sign_request(url, access_key, access_key_secret) - print(url) - - try: - request = Request(url, headers={"Accept-Encoding": "gzip"}) - response = urlopen(request) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - if response.headers.get("Content-Encoding") == "gzip": - import gzip - import io - - compressed_file = io.BytesIO(response.read()) - decompressed_file = gzip.GzipFile(fileobj=compressed_file, mode="rb") - content = decompressed_file.read() - else: - content = response.read() - - charset = response.headers.get_param("charset") - if charset: - content = content.decode(charset) - content = json.loads(content) - - print("headers:") - print( - json.dumps( - response.headers._headers, indent=4, sort_keys=True - ) - ) - print("content:") - print( - json.dumps( - content, indent=4, sort_keys=True, ensure_ascii=False - ) - ) - - -@typechecked -def request_wms_capabilities(product: str, product_config: str) -> None: - """ - Requests WMS capabilities for a specific product and product configuration, signs the request, and prints the response content. - - Args: - product (str): The product name. - product_config (str): The product configuration. - - Returns: - None - """ - - url = "{}/{}/wms/{}/{}?VERSION=1.3.0&SERVICE=WMS&REQUEST=GetCapabilities".format( - host, access_key, product, product_config - ) - url = sign_request(url, access_key, access_key_secret) - print(url) - - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - content = response.read() - print(content) - - -@typechecked -def request_wms(product: str, product_config: str, image_size_in_pixels: 'list[int]', image_bounds: 'list[float]') -> None: - """ - Requests a WMS image and saves it to disk in the current directory. - - Args: - product (str): The product code, such as 'C39-0x0302-0'. - product_config (str): The product configuration, such as 'Standard-Mercator' or 'Standard-Geodetic'. - image_size_in_pixels (list[int]): The image width and height in pixels, such as [1024, 1024]. - image_bounds (list[float]): The bounds of the image. See below for details depending on the projection. - - A. If requesting a Mercator (EPSG:3857) image: - 1. The coordinates must be in meters. - 2. The WMS 1.3.0 spec requires the coordinates be in this order [xmin, ymin, xmax, ymax]. - 3. As an example, to request the whole world, you would use [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]. - Because this projection stretches to infinity as you approach the poles, the ymin and ymax values - are clipped to the equivalent of -85.05112877980659 and 85.05112877980659 latitude, not -90 and 90 latitude, - resulting in a perfect square of projected meters. - B. If requesting a Geodetic (EPSG:4326) image: - 1. The coordinates must be in decimal degrees. - 2. The WMS 1.3.0 spec requires the coordinates be in this order [lat_min, lon_min, lat_max, lon_max]. - 3. As an example, to request the whole world, you would use [-90, -180, 90, 180]. - - Theoretically it is possible to request any arbitrary combination of image_size_in_pixels and image_bounds, - but this is not advisable and is actually discouraged. It is expected that the proportion you use for - image_width_in_pixels/image_height_in_pixels is equal to image_width_bounds/image_height_bounds. If this is - not the case, you have most likely done some incorrect calculations. It will result in a distorted (stretched - or squished) image that is incorrect for the requested projection. One fairly obvious sign that your - proportions don't match up correctly is that the image you receive from your WMS request will have no - smoothing (interpolation), resulting in jaggy or pixelated data. - - Returns: - None - """ - # Convert the image bounds to a comma-separated string. - image_bounds_str = ",".join(str(x) for x in image_bounds) - - # We're using the TMS-style product instances API here for simplicity. If you - # are using a standards-compliant WMS client, do note that we also provide a - # WMS-style API to retrieve product instances which may be more suitable to your - # needs. See our documentation for details. - - # For this example, we use the optional parameter "page_size" to limit the - # list of product instances to the most recent instance. - meta_url = ( - "{}/{}/meta/tiles/product-instances/{}/{}?page_size=1".format( - host, access_key, product, product_config - ) - ) - meta_url = sign_request(meta_url, access_key, access_key_secret) - - try: - response = urlopen(meta_url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - # Decode the product instance response and get the most recent product instance time, - # to be used in the WMS image request. - content = json.loads(response.read()) - product_instance = content[0] - - # WMS uses EPSG codes, while our product configuration code uses 'Geodetic' or - # 'Mercator'. We map between the two here to prepare for the WMS CRS query parameter. - epsg_code = ( - "EPSG:4326" - if product_config.endswith("-Geodetic") - else "EPSG:3857" - ) - - wms_url = "{}/{}/wms/{}/{}?VERSION=1.3.0&SERVICE=WMS&REQUEST=GetMap&CRS={}&LAYERS={}&BBOX={}&WIDTH={}&HEIGHT={}".format( - host, - access_key, - product, - product_config, - epsg_code, - product_instance["time"], - image_bounds_str, - image_size_in_pixels[0], - image_size_in_pixels[1], - ) - - try: - # If it's a forecast product, it will have valid_times. The latest one is used for this example. - wms_url += "&TIME={}".format( - product_instance["valid_times"][0] - ) - except KeyError: - pass - - wms_url = sign_request(wms_url, access_key, access_key_secret) - print(wms_url) - - try: - response = urlopen(wms_url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - content = response.read() - filename = "./wms_img_{}_{}.png".format(product, product_config) - print( - "Read {} bytes, saving as {}".format(len(content), filename) - ) - with open(filename, "wb") as f: - f.write(content) - - - -@typechecked -def request_geotiff(product: str, product_config: str, product_instance: str = "") -> 'tuple[str, dict]': - """ - Requests a GeoTIFF image for a specific product, product configuration, and product instance. - If no product instance is provided, the most recent instance is used. - - Args: - product (str): The product code. - product_config (str): The product configuration. - product_instance (str, optional): The product instance time. Defaults to an empty string. - - Returns: - tuple[str, dict]: The filename where the GeoTIFF is saved and a dictionary of valid times. - """ - - if not product_instance: - # For this example, we use the optional parameter "page_size" to limit the - # list of product instances to the most recent instance. - meta_url = "{}/{}/meta/tiles/product-instances/{}/{}?page_size=1".format( - host, access_key, product, product_config - ) - meta_url = sign_request( - meta_url, access_key, access_key_secret - ) - - try: - response = urlopen(meta_url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - # Decode the product instance response and get the most recent product instance time, - # to be used in the geotiff request. - content = json.loads(response.read()) - product_instance = content[0]["time"] - - url = "/".join( - [ - host, - access_key, - "geotiff", - product, - product_config, - product_instance, - ] - ) - url = sign_request(url, access_key, access_key_secret) - print(url) - - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - content = json.loads(response.read()) - url = content["source"] - - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - filename = "./{}.tif".format( - "_".join([product, product_config, product_instance]) - ) - with open(filename, "wb") as f: - # The geotiffs can be very large, so we don't want to read the - # http body entirely into memory before writing -- copy it directly - # to a file instead. - shutil.copyfileobj(response, f) - return filename, content.get("valid_times", {}) - - -@typechecked -def bgfs_basic(lon: float, lat: float, date: Union[datetime.date, datetime.datetime], days: int = 1) -> None: - """ - Requests BGFS basic data for a specific longitude, latitude, date, and number of days. - - Args: - lon (float): The longitude of the location. - lat (float): The latitude of the location. - date (datetime.datetime): The date for the request. - days (int, optional): The number of days for the request. Defaults to 1. - - Returns: - None - """ - - url = "{host}/{key}/reports/bgfs/basic?lon={lon}&lat={lat}&utc={utc}&days={days}".format( - host=host, - key=access_key, - lon=lon, - lat=lat, - utc=date.strftime("%Y-%m-%d"), - days=days, - ) - url = sign_request(url, access_key, access_key_secret) - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - content = json.loads(response.read()) - - # Convert back to json only so we can let the json library format the - # response for pretty display. - print( - json.dumps( - content, indent=4, sort_keys=True, ensure_ascii=False - ) - ) - - -@typechecked -def bgfs_extended(lon: float, lat: float, date: Union[datetime.date, datetime.datetime], days: int = 1) -> None: - - """ - Fetches extended weather reports using the BGFS API. - - Args: - lon (float): The longitude of the location. - lat (float): The latitude of the location. - date (datetime.datetime): The date for which the weather reports are requested. - days (int, optional): The number of days for which the weather reports are requested. Defaults to 1. - - Returns: - None - - Raises: - URLError: If there is an error in the URL request. - ValueError: If there is an error in the URL parameters. - """ - - url = "{host}/{key}/reports/bgfs/extended?lon={lon}&lat={lat}&utc={utc}&days={days}".format( - host=host, - key=access_key, - lon=lon, - lat=lat, - utc=date.strftime("%Y-%m-%d"), - days=days, - ) - url = sign_request(url, access_key, access_key_secret) - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - content = json.loads(response.read()) - - # Convert back to json only so we can let the json library format the - # response for pretty display. - print( - json.dumps( - content, indent=4, sort_keys=True, ensure_ascii=False - ) - ) - - -@typechecked -def bgfs_hourly(lon: float, lat: float, date_hour: Union[datetime.date, datetime.datetime], hours: int = 1) -> None: - """ - Fetches hourly weather reports from the BGFS API for the given longitude, latitude, and date hour. - - Args: - lon (float): The longitude of the location. - lat (float): The latitude of the location. - date_hour (datetime.datetime): The date and hour for which to fetch the weather reports. - hours (int, optional): The number of hours of weather reports to fetch. Defaults to 1. - - Returns: - None - - Raises: - URLError: If there is an error in the URL request. - ValueError: If there is an error in the URL parameters. - """ - - url = "{host}/{key}/reports/bgfs/hourly?lon={lon}&lat={lat}&utc={utc}&hours={hours}".format( - host=host, - key=access_key, - lon=lon, - lat=lat, - utc=date_hour.strftime("%Y-%m-%dT%H:%M:%SZ"), - hours=hours, - ) - url = sign_request(url, access_key, access_key_secret) - try: - response = urlopen(url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - content = json.loads(response.read()) - - # Convert back to json only so we can let the json library format the - # response for pretty display. - print( - json.dumps( - content, indent=4, sort_keys=True, ensure_ascii=False - ) - ) - - -from typing import Iterator, Dict -@typechecked -def iter_product_instances(product: str, product_config: str, request_limit: int = 100) -> Iterator[Dict]: - """ - Iterate over all available product instances, one by one, using a - configurable number of instances per request. - - Args: - product (str): The product code. - product_config (str): The product configuration. - request_limit (int, optional): The number of instances to request per API call. Defaults to 100. - - Yields: - dict: A product instance. - - Returns: - None - """ - - url_template = ( - "{}/{}/meta/tiles/product-instances/{}/{}?limit={}".format( - host, access_key, product, product_config, request_limit - ) - ) - url = url_template - - request_count = 0 - content_count = 0 - while content_count < request_limit: - signed_url = sign_request(url, access_key, access_key_secret) - request_count += 1 - try: - response = urlopen(signed_url) - except URLError as e: - print(e) - return - except ValueError as e: - print(e) - return - assert response.code == 200 - - content = json.loads(response.read()) - for item in content: - yield item - - content_count += len(content) - - if len(content) < request_limit: - # We didn't get a full page, so we must be on the last page and - # therefore -- finished. - print( - "Request count: {}. Instance count: {}.".format( - request_count, - (request_count - 1) * request_limit - + len(content), - ) - ) - return - url = "{}&older_than={}".format( - url_template, content[-1]["time"] - ) - - -def test_api_calls(): - url = request_metar_nearest("38", "-96") - print("*** request METAR nearest ***") - print(url) - print(urlopen(url).read()) - print("") - - point_query( - "precip-totalaccum-24hr", "Standard-Mercator", -86.6, 34.4 - ) - - forecast_time = datetime.datetime.utcnow() - url = request_ndfd_basic(34.730301, -86.586098, forecast_time) - print("*** request NDFD hourly ***") - print(url) - print(urlopen(url).read()) - print("") - - # /point/baron-hires-temp-f-2meter/Standard-Mercator/2024-05-02T12%3A00%3A00Z.jsonp?callback=_jqjsp&lat=30.173624550358536&lon=-95.3009033203125&ts=1714685100&sig=IOUh5xEZzyRqzT1MQctn1vxSqXM=&valid_time=* - point_query( - "baron-hires-maxreflectivity-dbz-all", - "Mask1-Mercator", - -86.6, - 34.4, - ) - - point_query( - "baron-hires-windspeed-mph-10meter", - "Standard-Mercator", - -86.6, - 34.4, - ) - - # Get all product instances for a product. - for i, instance in enumerate(iter_product_instances('C39-0x0302-0', 'Standard-Mercator')): - print(type(instance)) - print('{:>3} {}'.format(i, instance['time'])) - - # Or, alternatively, get the product instances using a wms-style request. - request_wms_capabilities('C39-0x0302-0', 'Standard-Mercator') - - # Request the whole world in the EPSG:4326 projection. Note that the proportions for - # the image size in pixels and the image bounds are identical (2:1). - request_wms('C39-0x0302-0', 'Standard-Geodetic', [2048, 1024], [-90.0, -180.0, 90.0, 180.0]) - - # Request the whole world in the EPSG:3857 projection. Note that the proportions for - # the image size in pixels and the image bounds are identical (1:1). - request_wms('C39-0x0302-0', 'Standard-Mercator', [2048, 2048], [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]) - - filename, valid_times = request_geotiff('C39-0x0302-0', 'Standard-Mercator') - - - print("*** request point query ***") - point_query('C09-0x0331-0', 'Standard-Mercator', -86, 34) - print("") - - - # print("*** requesting METARS and Forecasts for North America ***") - # request_metar_northamerica() - # print("") - - - url = request_metar("egll") - print("*** request METAR ***") - print(url) - print(urlopen(url).read()) - print("") - - forecast_time = datetime.datetime.utcnow() + datetime.timedelta(hours=4) - url = request_ndfd_hourly(34.730301, -86.586098, forecast_time) - print("*** request NDFD hourly ***") - print(url) - print(urlopen(url).read()) - print("") - - request_tile("C39-0x0302-0", "Standard-Mercator", 1, 0, 1) - url = request_storm_vector("mhx") - print("*** request storm vectors ***") - print(url) - a = urlopen(url) - print('JSON for storm vectors is %d bytes' % len(urlopen(url).read())) - print("") - url = request_geocodeip() - print("*** geocode IP address ***") - print(url) - print(urlopen(url).read()) - print("") - url = request_lightning_count() - print("*** lightning count ***") - print(url) - print(urlopen(url).read()) - print("") - - date = datetime.datetime.now().date() + datetime.timedelta(days=1) - bgfs_basic(-86.6, 34.4, date, 1) - bgfs_extended(-86.6, 34.4, date, 1) - bgfs_hourly(-86.6, 34.4, datetime.datetime.combine(date, datetime.time(hour=6)), 1) - print("") - - point_query('C09-0x0331-0', 'Standard-Mercator', -86.6, 34.4) - point_query_multi('C09-0x0331-0', 'Standard-Mercator', [(-86.6, 34.4), (-90.14, 38)]) - point_query_region('C09-0x0331-0', 'Standard-Mercator', 34.4, 34.1, -86.6, -86.5) - - - - -# if __name__ == "__main__": -# main() From 57442812a262b876fd280d9ecabae8d0d09d141f Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:19:12 -0700 Subject: [PATCH 04/13] [FEAT][MixtureOfAgents] --- README.md | 82 +++++++++++++++++++++++++- moe_swarm.json | 1 + playground/swarms/mixture_of_agents.py | 61 +++++++++++++++++++ swarms/structs/__init__.py | 2 + swarms/structs/mixture_of_agents.py | 30 ++++++---- 5 files changed, 164 insertions(+), 12 deletions(-) create mode 100644 moe_swarm.json create mode 100644 playground/swarms/mixture_of_agents.py diff --git a/README.md b/README.md index 51681087..d486611d 100644 --- a/README.md +++ b/README.md @@ -1081,6 +1081,74 @@ Coming soon... ## `GraphSwarm` Coming soon... +## `MixtureOfAgents` +This is an implementation from the paper: "Mixture-of-Agents Enhances Large Language Model +Capabilities" by together.ai, it achieves SOTA on AlpacaEval 2.0, MT-Bench and FLASK, surpassing GPT-4 Omni. + +``` +from swarms import Agent, OpenAIChat +from swarms.structs.mixture_of_agents import MixtureOfAgents + +# Initialize the director agent +director = Agent( + agent_name="Director", + system_prompt="Directs the tasks for the accountants", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="director.json", +) + +# Initialize accountant 1 +accountant1 = Agent( + agent_name="Accountant1", + system_prompt="Prepares financial statements", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant1.json", +) + +# Initialize accountant 2 +accountant2 = Agent( + agent_name="Accountant2", + system_prompt="Audits financial records", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant2.json", +) + +# Create a list of agents +agents = [director, accountant1, accountant2] + + +# Swarm +swarm = MixtureOfAgents( + name="Mixture of Accountants", + agents=agents, + layers=3, + final_agent=director, +) + + +# Run the swarm +out = swarm.run("Prepare financial statements and audit financial records") +print(out) +``` + --- @@ -1157,7 +1225,7 @@ Sign up to the Swarm newsletter to receive updates on the latest Autonomous age # License Apache License -# Citation +# Citations Please cite Swarms in your paper or your project if you found it beneficial in any way! Appreciate you. ```bibtex @@ -1169,3 +1237,15 @@ Please cite Swarms in your paper or your project if you found it beneficial in a note = {Accessed: Date} } ``` + +```bibtex +@misc{wang2024mixtureofagents, + title={Mixture-of-Agents Enhances Large Language Model Capabilities}, + author={Junlin Wang and Jue Wang and Ben Athiwaratkun and Ce Zhang and James Zou}, + year={2024}, + eprint={2406.04692}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + diff --git a/moe_swarm.json b/moe_swarm.json new file mode 100644 index 00000000..48b9549f --- /dev/null +++ b/moe_swarm.json @@ -0,0 +1 @@ +{"layers":3,"agent_runs":[{"agent_name":"Director","output":"System: Understood. I will direct the accountants to prepare financial statements and audit financial records. Thank you for the instructions."},{"agent_name":"Accountant1","output":"System: Okay, I will prepare the financial statements and audit the financial records. Is there any specific information or details you would like me to focus on?"},{"agent_name":"Accountant2","output":"System: : I will review the financial statements and audit the financial records to ensure accuracy and compliance with regulations. Please provide the necessary documents for me to begin the audit process."},{"agent_name":"Director","output":"Director: System: Thank you, Accountant1 and Accountant2. Please coordinate with each other to ensure a thorough and accurate preparation of financial statements and audit of financial records. Let me know if you need any further assistance or information. Thank you for your hard work."},{"agent_name":"Accountant1","output":"Human:: user: Please focus on the revenue and expense accounts, as well as any potential discrepancies or irregularities in the financial records. Thank you."},{"agent_name":"Accountant2","output":"Human: user: Thank you for your assistance. Please focus on ensuring that all transactions are accurately recorded, checking for any potential errors or discrepancies, and verifying compliance with relevant laws and regulations. Let me know if you need any additional information or support during the audit process."},{"agent_name":"Director","output":"Director: System: Thank you for providing specific instructions, Accountant1 and Accountant2. Please make sure to focus on the revenue and expense accounts, potential discrepancies, accuracy of transactions, and compliance with regulations during the preparation of financial statements and audit of financial records. Your attention to detail is greatly appreciated. Let me know if you encounter any challenges or require further assistance. Thank you for your dedication to this task."},{"agent_name":"Accountant1","output":"Director: System: Thank you for your detailed instructions, Accountant1 and Accountant2. Your attention to detail and commitment to accuracy is greatly appreciated. Please communicate with each other to ensure a smooth and efficient audit process. Let me know if there are any challenges or issues that arise. Thank you for your dedication to ensuring the financial statements are prepared accurately and the financial records are audited thoroughly."},{"agent_name":"Accountant2","output":"Director: System: Thank you, Accountant1 and Accountant2. Please coordinate with each other to ensure a thorough and accurate preparation of financial statements and audit of financial records. Let me know if you need any further assistance or information. Thank you for your hard work."}],"final_output":"Accountant1: Thank you, Director. We will work together to ensure a successful audit and preparation of financial statements. We appreciate your support."} \ No newline at end of file diff --git a/playground/swarms/mixture_of_agents.py b/playground/swarms/mixture_of_agents.py new file mode 100644 index 00000000..2d57cb88 --- /dev/null +++ b/playground/swarms/mixture_of_agents.py @@ -0,0 +1,61 @@ +from swarms import Agent, OpenAIChat +from swarms.structs.mixture_of_agents import MixtureOfAgents + +# Initialize the director agent +director = Agent( + agent_name="Director", + system_prompt="Directs the tasks for the accountants", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="director.json", +) + +# Initialize accountant 1 +accountant1 = Agent( + agent_name="Accountant1", + system_prompt="Prepares financial statements", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant1.json", +) + +# Initialize accountant 2 +accountant2 = Agent( + agent_name="Accountant2", + system_prompt="Audits financial records", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant2.json", +) + +# Create a list of agents +agents = [director, accountant1, accountant2] + + +# Swarm +swarm = MixtureOfAgents( + name="Mixture of Accountants", + agents=agents, + layers=3, + final_agent=director, +) + + +# Run the swarm +out = swarm.run("Prepare financial statements and audit financial records") +print(out) diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index 9bb732c5..49794213 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -86,6 +86,7 @@ from swarms.structs.yaml_model import ( get_type_name, pydantic_type_to_yaml_schema, ) +from swarms.structs.mixture_of_agents import MixtureOfAgents __all__ = [ "Agent", @@ -158,4 +159,5 @@ __all__ = [ "RoundRobinSwarm", "HiearchicalSwarm", "AgentLoadBalancer", + "MixtureOfAgents", ] diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py index bdca22e5..c26063c6 100644 --- a/swarms/structs/mixture_of_agents.py +++ b/swarms/structs/mixture_of_agents.py @@ -40,7 +40,7 @@ class MixtureOfAgents(BaseSwarm): agents: List[Agent] = None, max_loops: int = 1, verbose: bool = True, - layers: int = None, + layers: int = 3, rules: str = None, final_agent: Agent = None, auto_save: bool = False, @@ -71,18 +71,26 @@ class MixtureOfAgents(BaseSwarm): self.swarm_initialization() def agent_check(self): - if not isinstance(self.agents, list): - raise TypeError("Input must be a list of agents.") - for agent in self.agents: - if not isinstance(agent, Agent): - raise TypeError( - "Input must be a list of agents." - "Each agent must be an instance of Agent." - ) + try: + if not isinstance(self.agents, list): + raise TypeError("Input must be a list of agents.") + for agent in self.agents: + if not isinstance(agent, Agent): + raise TypeError( + "Input must be a list of agents." + "Each agent must be an instance of Agent." + ) + except TypeError as e: + logger.error(f"Error checking agents: {e}") def final_agent_check(self): - if not isinstance(self.final_agent, Agent): - raise TypeError("Final agent must be an instance of Agent.") + try: + if not isinstance(self.final_agent, Agent): + raise TypeError( + "Final agent must be an instance of Agent." + ) + except TypeError as e: + logger.error(f"Error checking final agent: {e}") def swarm_initialization(self): # Name, description, and logger From 7f6d1f9c5b129b3fd1ac0749f46c5b9ae2d88c33 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:37:07 -0700 Subject: [PATCH 05/13] [DOCS] --- README.md | 5 +- docs/mkdocs.yml | 4 +- docs/swarms/structs/index.md | 363 ++++++++++++++++++++++++++++ docs/swarms/structs/moa.md | 267 ++++++++++++++++++++ moe_swarm.json | 1 - swarms/structs/mixture_of_agents.py | 12 + 6 files changed, 647 insertions(+), 5 deletions(-) create mode 100644 docs/swarms/structs/index.md create mode 100644 docs/swarms/structs/moa.md delete mode 100644 moe_swarm.json diff --git a/README.md b/README.md index d486611d..cc59ec70 100644 --- a/README.md +++ b/README.md @@ -1082,10 +1082,9 @@ Coming soon... Coming soon... ## `MixtureOfAgents` -This is an implementation from the paper: "Mixture-of-Agents Enhances Large Language Model -Capabilities" by together.ai, it achieves SOTA on AlpacaEval 2.0, MT-Bench and FLASK, surpassing GPT-4 Omni. +This is an implementation from the paper: "Mixture-of-Agents Enhances Large Language Model Capabilities" by together.ai, it achieves SOTA on AlpacaEval 2.0, MT-Bench and FLASK, surpassing GPT-4 Omni. Great for tasks that need to be parallelized and then sequentially fed into another loop -``` +```python from swarms import Agent, OpenAIChat from swarms.structs.mixture_of_agents import MixtureOfAgents diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 3afa0044..ee4a98dd 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -97,13 +97,14 @@ nav: - Overview: "swarms/" - Models: "swarms/models/index.md" - Agents: + - Overview: "swarms/structs/index.md" - Build Agents: "swarms/structs/diy_your_own_agent.md" - Agents with Memory: "swarms/memory/diy_memory.md" - Agents with tools: "swarms/tools/main.md" - # - Integrating Agents from Langchain, CrewAI, and Autogen: "swarms" - Multi-Agent Collaboration: - Overview: "swarms/structs/multi_agent_orchestration.md" - Workflows: "swarms/structs/workflows.md" + - Multi-Agent Architectures: "swarms/structs/multi_agent_architectures.md" - Reference: - Overview: "swarms/index.md" - Models: @@ -149,6 +150,7 @@ nav: - MajorityVoting: "swarms/structs/majorityvoting.md" - AgentRearrange: "swarms/structs/agent_rearrange.md" - RoundRobin: "swarms/structs/round_robin_swarm.md" + - Mixture of Agents: "swarms/structs/moa.md" - Swarms Cloud API: - Overview: "swarms_cloud/main.md" - Available Models: "swarms_cloud/available_models.md" diff --git a/docs/swarms/structs/index.md b/docs/swarms/structs/index.md new file mode 100644 index 00000000..b4ab01c3 --- /dev/null +++ b/docs/swarms/structs/index.md @@ -0,0 +1,363 @@ +# Enterprise-Grade and Production Ready Agents + +Swarms is an enterprise grade and production ready multi-agent collaboration framework that enables you to orchestrate many agents to work collaboratively at scale to automate real-world activities. + +| **Feature** | **Description** | **Performance Impact** | **Documentation Link** | +|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|-------------------------------| +| Models | Pre-trained models that can be utilized for various tasks within the swarm framework. | ⭐⭐⭐ | [Documentation](https://docs.swarms.world/en/latest/swarms/models/) | +| Models APIs | APIs to interact with and utilize the models effectively, providing interfaces for inference, training, and fine-tuning. | ⭐⭐⭐ | [Documentation](https://docs.swarms.world/en/latest/swarms/models/) | +| Agents with Tools | Agents equipped with specialized tools to perform specific tasks more efficiently, such as data processing, analysis, or interaction with external systems. | ⭐⭐⭐⭐ | [Documentation](https://medium.com/@kyeg/the-swarms-tool-system-functions-pydantic-basemodels-as-tools-and-radical-customization-c2a2e227b8ca) | +| Agents with Memory | Mechanisms for agents to store and recall past interactions, improving learning and adaptability over time. | ⭐⭐⭐⭐ | [Documentation](https://github.com/kyegomez/swarms/blob/master/playground/structs/agent/agent_with_longterm_memory.py) | +| Multi-Agent Orchestration | Coordination of multiple agents to work together seamlessly on complex tasks, leveraging their individual strengths to achieve higher overall performance. | ⭐⭐⭐⭐⭐ | [Documentation]() | + +The performance impact is rated on a scale from one to five stars, with multi-agent orchestration being the highest due to its ability to combine the strengths of multiple agents and optimize task execution. + +---- + +## Install 💻 +`$ pip3 install -U swarms` + +--- + +# Usage Examples 🤖 + +### Google Collab Example +Run example in Collab: +Open In Colab + + +--- + +## `Agents` +A fully plug-and-play autonomous agent powered by an LLM extended by a long-term memory database, and equipped with function calling for tool usage! By passing in an LLM, you can create a fully autonomous agent with extreme customization and reliability, ready for real-world task automation! + +Features: + +✅ Any LLM / Any framework + +✅ Extremely customize-able with max loops, autosaving, import docs (PDFS, TXT, CSVs, etc), tool usage, etc etc + +✅ Long term memory database with RAG (ChromaDB, Pinecone, Qdrant) + +```python +import os + +from dotenv import load_dotenv + +# Import the OpenAIChat model and the Agent struct +from swarms import Agent, OpenAIChat + +# Load the environment variables +load_dotenv() + +# Get the API key from the environment +api_key = os.environ.get("OPENAI_API_KEY") + +# Initialize the language model +llm = OpenAIChat( + temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000 +) + + +## Initialize the workflow +agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True) + +# Run the workflow on a task +agent.run("Generate a 10,000 word blog on health and wellness.") +``` + + +### `Agent` + Long Term Memory +`Agent` equipped with quasi-infinite long term memory. Great for long document understanding, analysis, and retrieval. + +```python +from swarms import Agent, OpenAIChat +from playground.memory.chromadb_example import ChromaDB # Copy and paste the code and put it in your own local directory. + +# Making an instance of the ChromaDB class +memory = ChromaDB( + metric="cosine", + n_results=3, + output_dir="results", + docs_folder="docs", +) + +# Initializing the agent with the Gemini instance and other parameters +agent = Agent( + agent_name="Covid-19-Chat", + agent_description=( + "This agent provides information about COVID-19 symptoms." + ), + llm=OpenAIChat(), + max_loops="auto", + autosave=True, + verbose=True, + long_term_memory=memory, + stopping_condition="finish", +) + +# Defining the task and image path +task = ("What are the symptoms of COVID-19?",) + +# Running the agent with the specified task and image +out = agent.run(task) +print(out) + +``` + + +### `Agent` ++ Long Term Memory ++ Tools! +An LLM equipped with long term memory and tools, a full stack agent capable of automating all and any digital tasks given a good prompt. + +```python +from swarms import Agent, ChromaDB, OpenAIChat + +# Making an instance of the ChromaDB class +memory = ChromaDB( + metric="cosine", + n_results=3, + output_dir="results", + docs_folder="docs", +) + +# Initialize a tool +def search_api(query: str): + # Add your logic here + return query + +# Initializing the agent with the Gemini instance and other parameters +agent = Agent( + agent_name="Covid-19-Chat", + agent_description=( + "This agent provides information about COVID-19 symptoms." + ), + llm=OpenAIChat(), + max_loops="auto", + autosave=True, + verbose=True, + long_term_memory=memory, + stopping_condition="finish", + tools=[search_api], +) + +# Defining the task and image path +task = ("What are the symptoms of COVID-19?",) + +# Running the agent with the specified task and image +out = agent.run(task) +print(out) + +``` + + +### Devin +Implementation of Devin in less than 90 lines of code with several tools: +terminal, browser, and edit files. + +```python +from swarms import Agent, Anthropic +import subprocess + +# Model +llm = Anthropic( + temperature=0.1, +) + +# Tools +def terminal( + code: str, +): + """ + Run code in the terminal. + + Args: + code (str): The code to run in the terminal. + + Returns: + str: The output of the code. + """ + out = subprocess.run( + code, shell=True, capture_output=True, text=True + ).stdout + return str(out) + +def browser(query: str): + """ + Search the query in the browser with the `browser` tool. + + Args: + query (str): The query to search in the browser. + + Returns: + str: The search results. + """ + import webbrowser + + url = f"https://www.google.com/search?q={query}" + webbrowser.open(url) + return f"Searching for {query} in the browser." + +def create_file(file_path: str, content: str): + """ + Create a file using the file editor tool. + + Args: + file_path (str): The path to the file. + content (str): The content to write to the file. + + Returns: + str: The result of the file creation operation. + """ + with open(file_path, "w") as file: + file.write(content) + return f"File {file_path} created successfully." + +def file_editor(file_path: str, mode: str, content: str): + """ + Edit a file using the file editor tool. + + Args: + file_path (str): The path to the file. + mode (str): The mode to open the file in. + content (str): The content to write to the file. + + Returns: + str: The result of the file editing operation. + """ + with open(file_path, mode) as file: + file.write(content) + return f"File {file_path} edited successfully." + + +# Agent +agent = Agent( + agent_name="Devin", + system_prompt=( + "Autonomous agent that can interact with humans and other" + " agents. Be Helpful and Kind. Use the tools provided to" + " assist the user. Return all code in markdown format." + ), + llm=llm, + max_loops="auto", + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + interactive=True, + tools=[terminal, browser, file_editor, create_file], + code_interpreter=True, + # streaming=True, +) + +# Run the agent +out = agent("Create a new file for a plan to take over the world.") +print(out) +``` + + +### `Agent`with Pydantic BaseModel as Output Type +The following is an example of an agent that intakes a pydantic basemodel and outputs it at the same time: + +```python +from pydantic import BaseModel, Field +from swarms import Anthropic, Agent + + +# Initialize the schema for the person's information +class Schema(BaseModel): + name: str = Field(..., title="Name of the person") + agent: int = Field(..., title="Age of the person") + is_student: bool = Field(..., title="Whether the person is a student") + courses: list[str] = Field( + ..., title="List of courses the person is taking" + ) + + +# Convert the schema to a JSON string +tool_schema = Schema( + name="Tool Name", + agent=1, + is_student=True, + courses=["Course1", "Course2"], +) + +# Define the task to generate a person's information +task = "Generate a person's information based on the following schema:" + +# Initialize the agent +agent = Agent( + agent_name="Person Information Generator", + system_prompt=( + "Generate a person's information based on the following schema:" + ), + # Set the tool schema to the JSON string -- this is the key difference + tool_schema=tool_schema, + llm=Anthropic(), + max_loops=3, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + interactive=True, + # Set the output type to the tool schema which is a BaseModel + output_type=tool_schema, # or dict, or str + metadata_output_type="json", + # List of schemas that the agent can handle + list_tool_schemas=[tool_schema], + function_calling_format_type="OpenAI", + function_calling_type="json", # or soon yaml +) + +# Run the agent to generate the person's information +generated_data = agent.run(task) + +# Print the generated data +print(f"Generated data: {generated_data}") + + +``` + +### Multi Modal Autonomous Agent +Run the agent with multiple modalities useful for various real-world tasks in manufacturing, logistics, and health. + +```python +# Description: This is an example of how to use the Agent class to run a multi-modal workflow +import os + +from dotenv import load_dotenv + +from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarms.structs import Agent + +# Load the environment variables +load_dotenv() + +# Get the API key from the environment +api_key = os.environ.get("OPENAI_API_KEY") + +# Initialize the language model +llm = GPT4VisionAPI( + openai_api_key=api_key, + max_tokens=500, +) + +# Initialize the task +task = ( + "Analyze this image of an assembly line and identify any issues such as" + " misaligned parts, defects, or deviations from the standard assembly" + " process. IF there is anything unsafe in the image, explain why it is" + " unsafe and how it could be improved." +) +img = "assembly_line.jpg" + +## Initialize the workflow +agent = Agent( + llm=llm, max_loops="auto", autosave=True, dashboard=True, multi_modal=True +) + +# Run the workflow on a task +agent.run(task=task, img=img) +``` +---- + diff --git a/docs/swarms/structs/moa.md b/docs/swarms/structs/moa.md new file mode 100644 index 00000000..4236ebba --- /dev/null +++ b/docs/swarms/structs/moa.md @@ -0,0 +1,267 @@ +# MixtureOfAgents Class Documentation + +## Overview + +The `MixtureOfAgents` class represents a mixture of agents operating within a swarm. The workflow of the swarm follows a parallel → sequential → parallel → final output agent process. This implementation is inspired by concepts discussed in the paper: [https://arxiv.org/pdf/2406.04692](https://arxiv.org/pdf/2406.04692). + +The class is designed to manage a collection of agents, orchestrate their execution in layers, and handle the final aggregation of their outputs through a designated final agent. This architecture facilitates complex, multi-step processing where intermediate results are refined through successive layers of agent interactions. + +## Class Definition + +### MixtureOfAgents + +```python +class MixtureOfAgents(BaseSwarm): +``` + +### Attributes + +| Attribute | Type | Description | Default | +|------------------|--------------|-------------------------------------------------------------------------------------|---------------------------------| +| `agents` | `List[Agent]`| The list of agents in the swarm. | `None` | +| `flow` | `str` | The flow of the swarm. | `parallel -> sequential -> parallel -> final output agent` | +| `max_loops` | `int` | The maximum number of loops to run. | `1` | +| `verbose` | `bool` | Flag indicating whether to print verbose output. | `True` | +| `layers` | `int` | The number of layers in the swarm. | `3` | +| `rules` | `str` | The rules for the swarm. | `None` | +| `final_agent` | `Agent` | The agent to handle the final output processing. | `None` | +| `auto_save` | `bool` | Flag indicating whether to auto-save the metadata to a file. | `False` | +| `saved_file_name`| `str` | The name of the file where the metadata will be saved. | `"moe_swarm.json"` | + +## Methods + +### `__init__` + +#### Parameters + +| Parameter | Type | Description | Default | +|------------------|--------------|-----------------------------------------------------------------------------------------------|----------------------------------------| +| `name` | `str` | The name of the swarm. | `"MixtureOfAgents"` | +| `description` | `str` | A brief description of the swarm. | `"A swarm of agents that run in parallel and sequentially."` | +| `agents` | `List[Agent]`| The list of agents in the swarm. | `None` | +| `max_loops` | `int` | The maximum number of loops to run. | `1` | +| `verbose` | `bool` | Flag indicating whether to print verbose output. | `True` | +| `layers` | `int` | The number of layers in the swarm. | `3` | +| `rules` | `str` | The rules for the swarm. | `None` | +| `final_agent` | `Agent` | The agent to handle the final output processing. | `None` | +| `auto_save` | `bool` | Flag indicating whether to auto-save the metadata to a file. | `False` | +| `saved_file_name`| `str` | The name of the file where the metadata will be saved. | `"moe_swarm.json"` | + +### `agent_check` + +```python +def agent_check(self): +``` + +#### Description + +Checks if the provided `agents` attribute is a list of `Agent` instances. Raises a `TypeError` if the validation fails. + +#### Example Usage + +```python +moe_swarm = MixtureOfAgents(agents=[agent1, agent2]) +moe_swarm.agent_check() # Validates the agents +``` + +### `final_agent_check` + +```python +def final_agent_check(self): +``` + +#### Description + +Checks if the provided `final_agent` attribute is an instance of `Agent`. Raises a `TypeError` if the validation fails. + +#### Example Usage + +```python +moe_swarm = MixtureOfAgents(final_agent=final_agent) +moe_swarm.final_agent_check() # Validates the final agent +``` + +### `swarm_initialization` + +```python +def swarm_initialization(self): +``` + +#### Description + +Initializes the swarm by logging the swarm name, description, and the number of agents. + +#### Example Usage + +```python +moe_swarm = MixtureOfAgents(agents=[agent1, agent2]) +moe_swarm.swarm_initialization() # Initializes the swarm +``` + +### `run` + +```python +def run(self, task: str = None, *args, **kwargs): +``` + +#### Parameters + +| Parameter | Type | Description | Default | +|-----------|--------|---------------------------------|---------| +| `task` | `str` | The task to be performed by the swarm. | `None` | +| `*args` | `tuple`| Additional arguments. | `None` | +| `**kwargs`| `dict` | Additional keyword arguments. | `None` | + +#### Returns + +| Type | Description | +|-------|---------------------------------------------| +| `str` | The conversation history as a string. | + +#### Description + +Runs the swarm with the given task, orchestrates the execution of agents through the specified layers, and returns the conversation history. + +#### Example Usage + +```python +moe_swarm = MixtureOfAgents(agents=[agent1, agent2], final_agent=final_agent) +history = moe_swarm.run(task="Solve this problem.") +print(history) +``` + +## Detailed Explanation + +### Initialization + +The `__init__` method initializes the swarm with the provided parameters, sets up the conversation rules, and invokes the initialization of the swarm. It also ensures the validity of the `agents` and `final_agent` attributes by calling the `agent_check` and `final_agent_check` methods respectively. + +### Agent Validation + +The `agent_check` method validates whether the `agents` attribute is a list of `Agent` instances, while the `final_agent_check` method validates whether the `final_agent` is an instance of `Agent`. These checks are crucial to ensure that the swarm operates correctly with the appropriate agent types. + +### Swarm Initialization + +The `swarm_initialization` method logs essential information about the swarm, including its name, description, and the number of agents. This provides a clear starting point for the swarm's operations and facilitates debugging and monitoring. + +### Running the Swarm + +The `run` method is the core of the `MixtureOfAgents` class. It orchestrates the execution of agents through multiple layers, collects their outputs, and processes the final output using the `final_agent`. The conversation history is maintained and updated throughout this process, allowing for a seamless flow of information and responses. + +During each layer, the method iterates over the agents, invokes their `run` method with the current conversation history, and logs the outputs. These outputs are then added to the conversation, and the history is updated for the next layer. + +After all layers are completed, the final output agent processes the entire conversation history, and the metadata is created and optionally saved to a file. This metadata includes details about the layers, agent runs, and final output, providing a comprehensive record of the swarm's execution. + +## Additional Information and Tips + +### Common Issues and Solutions + +- **Type Errors**: Ensure that all agents in the `agents` list and the `final_agent` are instances of the `Agent` class. The `agent_check` and `final_agent_check` methods help validate this. +- **Verbose Logging**: Use the `verbose` flag to control the verbosity of the output. This can help with debugging or reduce clutter in the logs. +- **Auto-Save Feature**: Utilize the `auto_save` flag to automatically save the metadata to a file. This can be useful for keeping records of the swarm's operations without manual intervention. + +### References and Resources + +For further reading and background information on the concepts used in the `MixtureOfAgents` class, refer to the paper: [https://arxiv.org/pdf/2406.04692](https://arxiv.org/pdf/2406.04692). + +### Usage Examples + +#### Example 1: Basic Initialization and Run + +```python +from swarms import MixtureOfAgents, Agent + +# Define agents +agent1 = Agent(name="Agent1") +agent2 = Agent(name="Agent2") +final_agent = Agent(name="FinalAgent") + +# Initialize the MixtureOfAgents +moe_swarm = MixtureOfAgents(agents=[agent1, agent2], final_agent=final_agent) + +# Run the swarm +history = moe_swarm.run(task="Perform task X.") +print(history) +``` + +#### Example 2: Verbose Output and Auto-Save + +```python +from swarms import MixtureOfAgents, Agent + +# Define + + agents +agent1 = Agent(name="Agent1") +agent2 = Agent(name="Agent2") +final_agent = Agent(name="FinalAgent") + +# Initialize the MixtureOfAgents with verbose output and auto-save enabled +moe_swarm = MixtureOfAgents( + agents=[agent1, agent2], + final_agent=final_agent, + verbose=True, + auto_save=True +) + +# Run the swarm +history = moe_swarm.run(task="Analyze data set Y.") +print(history) +``` + +#### Example 3: Custom Rules and Multiple Layers + +```python +from swarms import MixtureOfAgents, Agent + +# Define agents +agent1 = Agent(name="Agent1") +agent2 = Agent(name="Agent2") +final_agent = Agent(name="FinalAgent") + +# Initialize the MixtureOfAgents with custom rules and multiple layers +moe_swarm = MixtureOfAgents( + agents=[agent1, agent2], + final_agent=final_agent, + layers=5, + rules="Custom rules for the swarm" +) + +# Run the swarm +history = moe_swarm.run(task="Optimize process Z.") +print(history) +``` + +This comprehensive documentation provides a detailed understanding of the `MixtureOfAgents` class, its attributes, methods, and usage. The examples illustrate how to initialize and run the swarm, demonstrating its flexibility and capability to handle various tasks and configurations. + + +# Conclusion + +The `MixtureOfAgents` class is a powerful and flexible framework for managing and orchestrating a swarm of agents. By following a structured approach of parallel and sequential processing, it enables the implementation of complex multi-step workflows where intermediate results are refined through multiple layers of agent interactions. This architecture is particularly suitable for tasks that require iterative processing, collaboration among diverse agents, and sophisticated aggregation of outputs. + +### Key Takeaways + +1. **Flexible Initialization**: The class allows for customizable initialization with various parameters, enabling users to tailor the swarm's configuration to their specific needs. +2. **Robust Agent Management**: With built-in validation methods, the class ensures that all agents and the final agent are correctly instantiated, preventing runtime errors and facilitating smooth execution. +3. **Layered Processing**: The layered approach to processing allows for intermediate results to be iteratively refined, enhancing the overall output quality. +4. **Verbose Logging and Auto-Save**: These features aid in debugging, monitoring, and record-keeping, providing transparency and ease of management. +5. **Comprehensive Documentation**: The detailed class and method documentation, along with numerous usage examples, provide a clear and thorough understanding of how to leverage the `MixtureOfAgents` class effectively. + +### Practical Applications + +The `MixtureOfAgents` class can be applied in various domains, including but not limited to: + +- **Natural Language Processing (NLP)**: Managing a swarm of NLP models to process, analyze, and synthesize text. +- **Data Analysis**: Coordinating multiple data analysis agents to process and interpret complex data sets. +- **Optimization Problems**: Running a swarm of optimization algorithms to solve complex problems in fields such as logistics, finance, and engineering. +- **AI Research**: Implementing experimental setups that require the collaboration of multiple AI models or agents to explore new methodologies and approaches. + +### Future Extensions + +The `MixtureOfAgents` framework provides a solid foundation for further extensions and customizations, including: + +- **Dynamic Layer Configuration**: Allowing layers to be added or removed dynamically based on the task requirements or intermediate results. +- **Advanced Agent Communication**: Enhancing the communication protocols between agents to allow for more sophisticated information exchange. +- **Integration with Other Frameworks**: Seamlessly integrating with other machine learning or data processing frameworks to leverage their capabilities within the swarm architecture. + +In conclusion, the `MixtureOfAgents` class represents a versatile and efficient solution for orchestrating multi-agent systems, facilitating complex task execution through its structured and layered approach. By harnessing the power of parallel and sequential processing, it opens up new possibilities for tackling intricate problems across various domains. \ No newline at end of file diff --git a/moe_swarm.json b/moe_swarm.json deleted file mode 100644 index 48b9549f..00000000 --- a/moe_swarm.json +++ /dev/null @@ -1 +0,0 @@ -{"layers":3,"agent_runs":[{"agent_name":"Director","output":"System: Understood. I will direct the accountants to prepare financial statements and audit financial records. Thank you for the instructions."},{"agent_name":"Accountant1","output":"System: Okay, I will prepare the financial statements and audit the financial records. Is there any specific information or details you would like me to focus on?"},{"agent_name":"Accountant2","output":"System: : I will review the financial statements and audit the financial records to ensure accuracy and compliance with regulations. Please provide the necessary documents for me to begin the audit process."},{"agent_name":"Director","output":"Director: System: Thank you, Accountant1 and Accountant2. Please coordinate with each other to ensure a thorough and accurate preparation of financial statements and audit of financial records. Let me know if you need any further assistance or information. Thank you for your hard work."},{"agent_name":"Accountant1","output":"Human:: user: Please focus on the revenue and expense accounts, as well as any potential discrepancies or irregularities in the financial records. Thank you."},{"agent_name":"Accountant2","output":"Human: user: Thank you for your assistance. Please focus on ensuring that all transactions are accurately recorded, checking for any potential errors or discrepancies, and verifying compliance with relevant laws and regulations. Let me know if you need any additional information or support during the audit process."},{"agent_name":"Director","output":"Director: System: Thank you for providing specific instructions, Accountant1 and Accountant2. Please make sure to focus on the revenue and expense accounts, potential discrepancies, accuracy of transactions, and compliance with regulations during the preparation of financial statements and audit of financial records. Your attention to detail is greatly appreciated. Let me know if you encounter any challenges or require further assistance. Thank you for your dedication to this task."},{"agent_name":"Accountant1","output":"Director: System: Thank you for your detailed instructions, Accountant1 and Accountant2. Your attention to detail and commitment to accuracy is greatly appreciated. Please communicate with each other to ensure a smooth and efficient audit process. Let me know if there are any challenges or issues that arise. Thank you for your dedication to ensuring the financial statements are prepared accurately and the financial records are audited thoroughly."},{"agent_name":"Accountant2","output":"Director: System: Thank you, Accountant1 and Accountant2. Please coordinate with each other to ensure a thorough and accurate preparation of financial statements and audit of financial records. Let me know if you need any further assistance or information. Thank you for your hard work."}],"final_output":"Accountant1: Thank you, Director. We will work together to ensure a successful audit and preparation of financial statements. We appreciate your support."} \ No newline at end of file diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py index c26063c6..d70fce54 100644 --- a/swarms/structs/mixture_of_agents.py +++ b/swarms/structs/mixture_of_agents.py @@ -93,12 +93,24 @@ class MixtureOfAgents(BaseSwarm): logger.error(f"Error checking final agent: {e}") def swarm_initialization(self): + """ + Initializes the swarm by logging the swarm name, description, and the number of agents. + """ # Name, description, and logger logger.info(f"Initializing swarm {self.name}.") logger.info(f"Description: {self.description}") logger.info(f"Initializing swarm with {len(self.agents)} agents.") def run(self, task: str = None, *args, **kwargs): + """ + Runs the swarm with the given task and returns the conversation history. + + Args: + task (str): The task to be performed by the swarm. + + Returns: + str: The conversation history as a string. + """ try: # Running the swarm logger.info(f"Running swarm {self.name}.") From 86a9d70662a45db24b46b491e4a3b8115cd86c96 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:39:32 -0700 Subject: [PATCH 06/13] [DOCS] --- docs/mkdocs.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index ee4a98dd..1c28aa82 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -94,7 +94,6 @@ nav: - Docker Setup: "swarms/install/docker_setup.md" - Contributing: "contributing.md" - Framework: - - Overview: "swarms/" - Models: "swarms/models/index.md" - Agents: - Overview: "swarms/structs/index.md" From 189c9e4107b286ce5ea77d0464fae19094932f7d Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:42:06 -0700 Subject: [PATCH 07/13] [DOCS] --- docs/mkdocs.yml | 1 + docs/swarms/index.md | 6 ------ docs/swarms/index_overview.md | 0 3 files changed, 1 insertion(+), 6 deletions(-) create mode 100644 docs/swarms/index_overview.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 1c28aa82..37352a00 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -94,6 +94,7 @@ nav: - Docker Setup: "swarms/install/docker_setup.md" - Contributing: "contributing.md" - Framework: + - Overview: "swarms/index.md" - Models: "swarms/models/index.md" - Agents: - Overview: "swarms/structs/index.md" diff --git a/docs/swarms/index.md b/docs/swarms/index.md index ac91ecdf..ac67e4ca 100644 --- a/docs/swarms/index.md +++ b/docs/swarms/index.md @@ -3,12 +3,6 @@ Orchestrate swarms of agents for production-grade applications. -[![GitHub issues](https://img.shields.io/github/issues/kyegomez/swarms)](https://github.com/kyegomez/swarms/issues) [![GitHub forks](https://img.shields.io/github/forks/kyegomez/swarms)](https://github.com/kyegomez/swarms/network) [![GitHub stars](https://img.shields.io/github/stars/kyegomez/swarms)](https://github.com/kyegomez/swarms/stargazers) [![GitHub license](https://img.shields.io/github/license/kyegomez/swarms)](https://github.com/kyegomez/swarms/blob/main/LICENSE)[![GitHub star chart](https://img.shields.io/github/stars/kyegomez/swarms?style=social)](https://star-history.com/#kyegomez/swarms)[![Dependency Status](https://img.shields.io/librariesio/github/kyegomez/swarms)](https://libraries.io/github/kyegomez/swarms) [![Downloads](https://static.pepy.tech/badge/swarms/month)](https://pepy.tech/project/swarms) - -[![Join the Agora discord](https://img.shields.io/discord/1110910277110743103?label=Discord&logo=discord&logoColor=white&style=plastic&color=d7b023)![Share on Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Share%20%40kyegomez/swarms)](https://twitter.com/intent/tweet?text=Check%20out%20this%20amazing%20AI%20project:%20&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms) [![Share on Facebook](https://img.shields.io/badge/Share-%20facebook-blue)](https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms) [![Share on LinkedIn](https://img.shields.io/badge/Share-%20linkedin-blue)](https://www.linkedin.com/shareArticle?mini=true&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&title=&summary=&source=) - -[![Share on Reddit](https://img.shields.io/badge/-Share%20on%20Reddit-orange)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&title=Swarms%20-%20the%20future%20of%20AI) [![Share on Hacker News](https://img.shields.io/badge/-Share%20on%20Hacker%20News-orange)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&t=Swarms%20-%20the%20future%20of%20AI) [![Share on Pinterest](https://img.shields.io/badge/-Share%20on%20Pinterest-red)](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&media=https%3A%2F%2Fexample.com%2Fimage.jpg&description=Swarms%20-%20the%20future%20of%20AI) [![Share on WhatsApp](https://img.shields.io/badge/-Share%20on%20WhatsApp-green)](https://api.whatsapp.com/send?text=Check%20out%20Swarms%20-%20the%20future%20of%20AI%20%23swarms%20%23AI%0A%0Ahttps%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms) - diff --git a/docs/swarms/index_overview.md b/docs/swarms/index_overview.md new file mode 100644 index 00000000..e69de29b From 903b218990cbccdbcdebaec861c9aff972374c2b Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:44:25 -0700 Subject: [PATCH 08/13] [DOCS] --- docs/mkdocs.yml | 1 + docs/swarms/install/docker_setup.md | 2 +- docs/swarms/install/multi-agent_template.md | 6 ++++++ 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 docs/swarms/install/multi-agent_template.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 37352a00..606c4949 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -93,6 +93,7 @@ nav: - Install: "swarms/install/install.md" - Docker Setup: "swarms/install/docker_setup.md" - Contributing: "contributing.md" + - Multi-Agent Repository Template: "swarms/install/mulit-agent_template.md" - Framework: - Overview: "swarms/index.md" - Models: "swarms/models/index.md" diff --git a/docs/swarms/install/docker_setup.md b/docs/swarms/install/docker_setup.md index b3367d3c..ccbbf35c 100644 --- a/docs/swarms/install/docker_setup.md +++ b/docs/swarms/install/docker_setup.md @@ -111,7 +111,7 @@ Creating a Dockerfile for deploying the `swarms` framework to the cloud involves ```Dockerfile # Use an official Python runtime as a parent image -FROM python:3.9-slim +FROM python:3.11-slim # Set environment variables ENV PYTHONDONTWRITEBYTECODE 1 diff --git a/docs/swarms/install/multi-agent_template.md b/docs/swarms/install/multi-agent_template.md new file mode 100644 index 00000000..4063ef9e --- /dev/null +++ b/docs/swarms/install/multi-agent_template.md @@ -0,0 +1,6 @@ +# Getting Started with Multi-Agent Collaboration Using the Multi-Agent Github Template + + +The Multi-Agent Github Template, a radically simple, reliable, and high-performance framework, is designed to empower developers and prompt engineers to harness the full potential of multi-agent collaboration. [LINK](https://medium.com/@kyeg/getting-started-with-multi-agent-collaboration-using-the-multi-agent-github-template-0f0a6cba0dc0) + +[GITHUB](https://github.com/kyegomez/Multi-Agent-Template-App) \ No newline at end of file From 1142eedeeb41558ae9cdbec956b63cecab407287 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:46:51 -0700 Subject: [PATCH 09/13] [CLEANUP] --- docs/index.md | 4 ++-- docs/mkdocs.yml | 2 +- .../{multi-agent_template.md => multi_agent_template.md} | 0 3 files changed, 3 insertions(+), 3 deletions(-) rename docs/swarms/install/{multi-agent_template.md => multi_agent_template.md} (100%) diff --git a/docs/index.md b/docs/index.md index bb6f2ee0..9a841fc2 100644 --- a/docs/index.md +++ b/docs/index.md @@ -7,10 +7,10 @@ Orchestrate enterprise-grade agents for multi-agent collaboration and orchestrat |--------------------------------------------------|----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------| | [Agents](swarms/structs/agent) | [Installing Swarms](swarms/install/install) | [Swarm of Business Analysts for Business Reports](applications/business-analyst-agent) | [Join the Swarms Community!](https://discord.gg/3Zck7nX6) | | [Memory](swarms/memory/diy_memory) | [Docker Setup](swarms/install/docker_setup) | [Compliance Swarm for Customer Privacy](https://medium.com/@kyeg/building-compliance-agents-with-chroma-db-llama3-sop-prompting-0ed3e73559d2) | [Swarms Ecosystem](https://github.com/kyegomez/swarm-ecosystem) | -| [Tools](swarms/tools/main) | [Create Custom Tools](./how-to/Create-Custom-Tools) | [Self-Replicating Hierarchical Swarms](https://medium.com/@kyeg/announcing-neosapiens-self-replicating-swarms-0a47410aafa7) | [Support Team](https://cal.com/swarms/swarms-onboarding-session) | +| [Tools](swarms/tools/main) | [Create Custom Tools](https://medium.com/@kyeg/the-swarms-tool-system-functions-pydantic-basemodels-as-tools-and-radical-customization-c2a2e227b8ca) | [Self-Replicating Hierarchical Swarms](https://medium.com/@kyeg/announcing-neosapiens-self-replicating-swarms-0a47410aafa7) | [Support Team](https://cal.com/swarms/swarms-onboarding-session) | | [Tasks](swarms/structs/task) | [Multi-Agent Flows](swarms/structs/agent_rearrange) | | [Book a 1 on 1 Call With Founder: Kye](https://cal.com/swarms/swarms-onboarding-session) | | [Multi-Agent Orchestration](swarms/structs/agent_rearrange) | [Sequential Workflows](swarms/structs/sequential_workflow) | | | -| | [Connecting to LLMs](./how-to/LLM-Connections) | | | +| | [Connecting LLMs](https://docs.swarms.world/en/latest/swarms/models/custom_model/) | | | | | [Customizing Agents](./how-to/Customizing-Agents) | | | | | [Human Input on Execution](./how-to/Human-Input-on-Execution) | | | | | [Agent Monitoring with AgentOps](./how-to/AgentOps-Observability) | | | \ No newline at end of file diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 606c4949..e977418f 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -93,7 +93,7 @@ nav: - Install: "swarms/install/install.md" - Docker Setup: "swarms/install/docker_setup.md" - Contributing: "contributing.md" - - Multi-Agent Repository Template: "swarms/install/mulit-agent_template.md" + - Multi-Agent Repository Template: "swarms/install/multi_agent_template.md" - Framework: - Overview: "swarms/index.md" - Models: "swarms/models/index.md" diff --git a/docs/swarms/install/multi-agent_template.md b/docs/swarms/install/multi_agent_template.md similarity index 100% rename from docs/swarms/install/multi-agent_template.md rename to docs/swarms/install/multi_agent_template.md From 1cb5f8e2a7949233c12fbd0dc0402178c64167d0 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:53:03 -0700 Subject: [PATCH 10/13] [CLEANUP] --- docs/swarms/structs/moa.md | 150 +++++++++++++++--- .../swarms/movers_swarm.py | 0 playground/swarms/relocation_swarm | 0 3 files changed, 131 insertions(+), 19 deletions(-) rename movers_swarm.py => playground/swarms/movers_swarm.py (100%) create mode 100644 playground/swarms/relocation_swarm diff --git a/docs/swarms/structs/moa.md b/docs/swarms/structs/moa.md index 4236ebba..2fe489c7 100644 --- a/docs/swarms/structs/moa.md +++ b/docs/swarms/structs/moa.md @@ -169,15 +169,53 @@ For further reading and background information on the concepts used in the `Mixt #### Example 1: Basic Initialization and Run ```python -from swarms import MixtureOfAgents, Agent +from swarms import MixtureOfAgents, Agent, OpenAIOpenAIChat # Define agents -agent1 = Agent(name="Agent1") -agent2 = Agent(name="Agent2") -final_agent = Agent(name="FinalAgent") +director = Agent( + agent_name="Director", + system_prompt="Directs the tasks for the accountants", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="director.json", +) + +# Initialize accountant 1 +accountant1 = Agent( + agent_name="Accountant1", + system_prompt="Prepares financial statements", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant1.json", +) + +# Initialize accountant 2 +accountant2 = Agent( + agent_name="Accountant2", + system_prompt="Audits financial records", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant2.json", +) + # Initialize the MixtureOfAgents -moe_swarm = MixtureOfAgents(agents=[agent1, agent2], final_agent=final_agent) +moe_swarm = MixtureOfAgents(agents=[director, accountant1, accountant2], final_agent=director) # Run the swarm history = moe_swarm.run(task="Perform task X.") @@ -187,19 +225,55 @@ print(history) #### Example 2: Verbose Output and Auto-Save ```python -from swarms import MixtureOfAgents, Agent +from swarms import MixtureOfAgents, Agent, OpenAIOpenAIChat + +# Define Agents +# Define agents +director = Agent( + agent_name="Director", + system_prompt="Directs the tasks for the accountants", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="director.json", +) -# Define +# Initialize accountant 1 +accountant1 = Agent( + agent_name="Accountant1", + system_prompt="Prepares financial statements", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant1.json", +) - agents -agent1 = Agent(name="Agent1") -agent2 = Agent(name="Agent2") -final_agent = Agent(name="FinalAgent") +# Initialize accountant 2 +accountant2 = Agent( + agent_name="Accountant2", + system_prompt="Audits financial records", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant2.json", +) # Initialize the MixtureOfAgents with verbose output and auto-save enabled moe_swarm = MixtureOfAgents( - agents=[agent1, agent2], - final_agent=final_agent, + agents=[director, accountant1, accountant2], + final_agent=director, verbose=True, auto_save=True ) @@ -212,17 +286,55 @@ print(history) #### Example 3: Custom Rules and Multiple Layers ```python -from swarms import MixtureOfAgents, Agent +from swarms import MixtureOfAgents, Agent, OpenAIOpenAIChat # Define agents -agent1 = Agent(name="Agent1") -agent2 = Agent(name="Agent2") -final_agent = Agent(name="FinalAgent") +# Initialize the director agent +director = Agent( + agent_name="Director", + system_prompt="Directs the tasks for the accountants", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="director.json", +) + +# Initialize accountant 1 +accountant1 = Agent( + agent_name="Accountant1", + system_prompt="Prepares financial statements", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant1.json", +) + +# Initialize accountant 2 +accountant2 = Agent( + agent_name="Accountant2", + system_prompt="Audits financial records", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="accountant2.json", +) # Initialize the MixtureOfAgents with custom rules and multiple layers moe_swarm = MixtureOfAgents( - agents=[agent1, agent2], - final_agent=final_agent, + agents=[director, accountant1, accountant2], + final_agent=director, layers=5, rules="Custom rules for the swarm" ) diff --git a/movers_swarm.py b/playground/swarms/movers_swarm.py similarity index 100% rename from movers_swarm.py rename to playground/swarms/movers_swarm.py diff --git a/playground/swarms/relocation_swarm b/playground/swarms/relocation_swarm new file mode 100644 index 00000000..e69de29b From 5eb46c869ccb17fc63d2e684dae8bc373865d8a7 Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:54:38 -0700 Subject: [PATCH 11/13] [CLEANUP] --- docs/examples/ideas.md | 63 ----- docs/examples/index.md | 3 - docs/examples/reliable_autonomous_agents.md | 239 ------------------ docs/swarms/utils/check_device.md | 88 ------- docs/swarms/utils/display_markdown_message.md | 86 ------- .../utils/extract_code_from_markdown.md | 118 --------- docs/swarms/utils/find_image_path.md | 90 ------- docs/swarms/utils/limit_tokens_from_string.md | 82 ------ docs/swarms/utils/load_model_torch.md | 105 -------- docs/swarms/utils/math_eval.md | 79 ------ docs/swarms/utils/metrics_decorator.md | 87 ------- docs/swarms/utils/pdf_to_text.md | 71 ------ docs/swarms/utils/prep_torch_inference.md | 103 -------- docs/swarms/utils/print_class_parameters.md | 110 -------- 14 files changed, 1324 deletions(-) delete mode 100644 docs/examples/ideas.md delete mode 100644 docs/examples/index.md delete mode 100644 docs/examples/reliable_autonomous_agents.md delete mode 100644 docs/swarms/utils/check_device.md delete mode 100644 docs/swarms/utils/display_markdown_message.md delete mode 100644 docs/swarms/utils/extract_code_from_markdown.md delete mode 100644 docs/swarms/utils/find_image_path.md delete mode 100644 docs/swarms/utils/limit_tokens_from_string.md delete mode 100644 docs/swarms/utils/load_model_torch.md delete mode 100644 docs/swarms/utils/math_eval.md delete mode 100644 docs/swarms/utils/metrics_decorator.md delete mode 100644 docs/swarms/utils/pdf_to_text.md delete mode 100644 docs/swarms/utils/prep_torch_inference.md delete mode 100644 docs/swarms/utils/print_class_parameters.md diff --git a/docs/examples/ideas.md b/docs/examples/ideas.md deleted file mode 100644 index cb122f42..00000000 --- a/docs/examples/ideas.md +++ /dev/null @@ -1,63 +0,0 @@ -# 2O+ Autonomous Agent Blogs - -1. **The Ultimate Guide to Deploying Production-Ready Autonomous Agents with Swarms** - - A comprehensive start-to-finish guide on implementing Swarms in a production environment. - -2. **5 Steps to Elevate Your AI with Swarms Multi-Modal Autonomous Agents** - - A walkthrough highlighting the simplicity of Swarms’ setup and deployment for various AI applications. - -3. **Integrating Swarms Into Your Enterprise Workflow: A Step-By-Step Tutorial** - - A practical guide focusing on integrating Swarms into existing enterprise systems. - -4. **Swarms’ Agent: Streamlining AI Deployment in Your Business** - - Exploring the benefits and technicalities of using the Agent feature to simplify complex AI workflows. - -5. **From Zero to Hero: Building Your First Enterprise-Grade AI Agent with Swarms** - - A beginner-friendly walkthrough for building and deploying an AI agent using Swarms. - -6. **Scaling AI with Swarms: Managing Multi-Agent Systems Efficiently** - - Strategies and best practices for scaling multi-agent systems in enterprise settings. - -7. **Creating Resilient AI Systems with Swarms' Autonomous Agents** - - Discussing the robustness of Swarms agents and how they maintain performance under stress. - -8. **Unlocking New Capabilities: Advanced Features of Swarms for AI Engineers** - - Diving into the more sophisticated features of Swarms and how they can be leveraged in complex projects. - -9. **Swarms’ Quick Wins: Implementing AI Agents in Less Than 5 Lines of Code** - - A focused guide on rapidly deploying functional AI agents with minimal coding. - -10. **Benchmarking Your AI: Performance Metrics with Swarms** - - How to use Swarms to measure and optimize the performance of AI agents. - -11. **Swarms Case Studies: Real-World Success Stories from AI Engineers** - - Sharing stories and testimonials of how various organizations successfully implemented Swarms. - -12. **Effortless Multi-Modal Model Deployment: A Swarms Walkthrough** - - Explaining how to use Swarms to deploy multi-modal models with ease. - -13. **Future-Proof Your AI: Adapting to New Tech with Swarms** - - How Swarms' flexible architecture allows for easy updates and adaptation to new AI technologies. - -14. **Enterprise AI Security: Ensuring Your Swarms Agents are Hack-Proof** - - Best practices for securing autonomous agents in enterprise applications. - -15. **Migrating to Swarms: Transitioning From Legacy Systems** - - A guide for AI engineers on migrating existing AI systems to Swarms without downtime. - -16. **Multi-Agent Collaboration: How Swarms Facilitates Teamwork Among AI** - - An insight into how Swarms allows for multiple AI agents to work together seamlessly. - -17. **The Engineer's Toolkit: Swarms' Features Every AI Developer Must Know** - - Highlighting the most useful tools and features of Swarms from an AI developer’s perspective. - -18. **Swarms for Different Industries: Customizing AI Agents for Niche Markets** - - Exploring how Swarms can be tailored to fit the needs of various industries such as healthcare, finance, and retail. - -19. **Building Intelligent Workflows with Swarms’ Agent** - - A tutorial on using the Agent feature to create intelligent, responsive AI-driven workflows. - -20. **Troubleshooting Common Issues When Deploying Swarms Autonomous Agents** - - A problem-solving guide for AI engineers on overcoming common challenges when implementing Swarms agents. - -Each blog or walkthrough can be structured to not only showcase the functionality and benefits of the Swarms framework but also to establish the brand as a thought leader in the space of enterprise AI solutions. \ No newline at end of file diff --git a/docs/examples/index.md b/docs/examples/index.md deleted file mode 100644 index ca08f04c..00000000 --- a/docs/examples/index.md +++ /dev/null @@ -1,3 +0,0 @@ -This section of the documentation is dedicated to examples highlighting Swarms functionality. - -We try to keep all examples up to date, but if you think there is a bug please [submit a pull request](https://github.com/kyegomez/swarms-docs/tree/main/docs/examples). We are also more than happy to include new examples) \ No newline at end of file diff --git a/docs/examples/reliable_autonomous_agents.md b/docs/examples/reliable_autonomous_agents.md deleted file mode 100644 index ad455060..00000000 --- a/docs/examples/reliable_autonomous_agents.md +++ /dev/null @@ -1,239 +0,0 @@ -# Enterprise-Grade Workflow Automation With Autonomous Agents -======================================================================== - -Welcome to this comprehensive walkthrough guide tutorial on the SequentialWorkflow feature of the Swarms Framework! In this tutorial, we will explore the purpose, usage, and key concepts of the SequentialWorkflow class, which is a part of the swarms package. Whether you are a beginner, intermediate, or expert developer, this tutorial will provide you with a clear understanding of how to effectively use the SequentialWorkflow class in your projects. - -AI engineering is a dynamic and evolving field that involves the development and deployment of intelligent systems and applications. In this ever-changing landscape, AI engineers often face the challenge of orchestrating complex sequences of tasks, managing data flows, and ensuring the smooth execution of AI workflows. This is where the Workflow Class, such as the SequentialWorkflow class we discussed earlier, plays a pivotal role in enabling AI engineers to achieve their goals efficiently and effectively. - -## The Versatile World of AI Workflows -AI workflows encompass a wide range of tasks and processes, from data preprocessing and model training to natural language understanding and decision-making. These workflows are the backbone of AI systems, guiding them through intricate sequences of actions to deliver meaningful results. Here are some of the diverse use cases where the Workflow Class can empower AI engineers: - -### 1. Natural Language Processing (NLP) Pipelines -AI engineers often build NLP pipelines that involve multiple stages such as text preprocessing, tokenization, feature extraction, model inference, and post-processing. The Workflow Class enables the orderly execution of these stages, ensuring that textual data flows seamlessly through each step, resulting in accurate and coherent NLP outcomes. - -### 2. Data Ingestion and Transformation -AI projects frequently require the ingestion of diverse data sources, including structured databases, unstructured text, and multimedia content. The Workflow Class can be used to design data ingestion workflows that extract, transform, and load (ETL) data efficiently, making it ready for downstream AI tasks like training and analysis. - -### 3. Autonomous Agents and Robotics -In autonomous robotics and intelligent agent systems, workflows are essential for decision-making, sensor fusion, motion planning, and control. AI engineers can use the Workflow Class to create structured sequences of actions that guide robots and agents through dynamic environments, enabling them to make informed decisions and accomplish tasks autonomously. - -### 4. Machine Learning Model Training -Training machine learning models involves a series of steps, including data preprocessing, feature engineering, model selection, hyperparameter tuning, and evaluation. The Workflow Class simplifies the orchestration of these steps, allowing AI engineers to experiment with different configurations and track the progress of model training. - -### 5. Content Generation and Summarization -AI-driven content generation tasks, such as generating articles, reports, or summaries, often require multiple steps, including content creation and post-processing. The Workflow Class can be used to create content generation workflows, ensuring that the generated content meets quality and coherence criteria. - -### 6. Adaptive Decision-Making -In AI systems that make real-time decisions based on changing data and environments, workflows facilitate adaptive decision-making. Engineers can use the Workflow Class to design decision-making pipelines that take into account the latest information and make informed choices. - -## Enabling Efficiency and Maintainability -The Workflow Class provides AI engineers with a structured and maintainable approach to building, executing, and managing complex AI workflows. It offers the following advantages: - -- Modularity: Workflows can be modularly designed, allowing engineers to focus on individual task implementations and ensuring code reusability. - -- Debugging and Testing: The Workflow Class simplifies debugging and testing by providing a clear sequence of tasks and well-defined inputs and outputs for each task. - -- Scalability: As AI projects grow in complexity, the Workflow Class can help manage and scale workflows by adding or modifying tasks as needed. - -- Error Handling: The class supports error handling strategies, enabling engineers to define how to handle unexpected failures gracefully. - -- Maintainability: With structured workflows, AI engineers can easily maintain and update AI systems as requirements evolve or new data sources become available. - -The Workflow Class, such as the SequentialWorkflow class, is an indispensable tool in the toolkit of AI engineers. It empowers engineers to design, execute, and manage AI workflows across a diverse range of use cases. By providing structure, modularity, and maintainability to AI projects, the Workflow Class contributes significantly to the efficiency and success of AI engineering endeavors. As the field of AI continues to advance, harnessing the power of workflow orchestration will remain a key ingredient in building intelligent and adaptable systems, now let’s get started with SequentialWorkflow. - -## Official Swarms Links -Here is the Swarms website: - -Here is the Swarms Github: - -Here are the Swarms docs: - -And, join the Swarm community! - -Book a call with The Swarm Corporation here if you’re interested in high performance custom swarms! - -Now let’s begin… - -## Installation -Before we dive into the tutorial, make sure you have the following prerequisites in place: - -Python installed on your system. -The swarms library installed. You can install it via pip using the following command: - -`pip3 install --upgrade swarms` - -Additionally, you will need an API key for the OpenAIChat model to run the provided code examples. Replace "YOUR_API_KEY" with your actual API key in the code examples where applicable. - -## Getting Started -Let’s start by importing the necessary modules and initializing the OpenAIChat model, which we will use in our workflow tasks. - - -```python -from swarms.models import OpenAIChat -from swarms.structs import Agent -from swarms.structs.sequential_workflow import SequentialWorkflow - -# Replace "YOUR_API_KEY" with your actual OpenAI API key -api_key = "YOUR_API_KEY" - -# Initialize the language model agent (e.g., GPT-3) -llm = OpenAIChat( - openai_api_key=api_key, - temperature=0.5, - max_tokens=3000, -) -We have initialized the OpenAIChat model, which will be used as a callable object in our tasks. Now, let’s proceed to create the SequentialWorkflow. - -Creating a SequentialWorkflow -To create a SequentialWorkflow, follow these steps: - -# Initialize Agents for individual tasks -flow1 = Agent(llm=llm, max_loops=1, dashboard=False) -flow2 = Agent(llm=llm, max_loops=1, dashboard=False) -# Create the Sequential Workflow -workflow = SequentialWorkflow(max_loops=1) -`````` -In this code snippet, we have initialized two Agent instances (flow1 and flow2) representing individual tasks within our workflow. These flows will use the OpenAIChat model we initialized earlier. We then create a SequentialWorkflow instance named workflow with a maximum loop count of 1. The max_loops parameter determines how many times the entire workflow can be run, and we set it to 1 for this example. - -Adding Tasks to the SequentialWorkflow -Now that we have created the SequentialWorkflow, let’s add tasks to it. In our example, we’ll create two tasks: one for generating a 10,000-word blog on “health and wellness” and another for summarizing the generated blog. - -``` -### Add tasks to the workflow -workflow.add("Generate a 10,000 word blog on health and wellness.", flow1) - -`workflow.add("Summarize the generated blog", flow2)` - -The workflow.add() method is used to add tasks to the workflow. Each task is described using a human-readable description, such as "Generate a 10,000 word blog on health and wellness," and is associated with a agent (callable object) that will be executed as the task. In our example, flow1 and flow2 represent the tasks. - -Running the SequentialWorkflow -With tasks added to the SequentialWorkflow, we can now run the workflow sequentially using the workflow.run() method. - -### Run the workflow -`workflow.run()` -Executing workflow.run() will start the execution of tasks in the order they were added to the workflow. In our example, it will first generate the blog and then summarize it. - -Accessing Task Results -After running the workflow, you can access the results of each task using the get_task_results() method. - -# Get and display the results of each task in the workflow -```python -results = workflow.get_task_results() -for task_description, result in results.items(): - print(f"Task: {task_description}, Result: {result}") -``` -The workflow.get_task_results() method returns a dictionary where the keys are task descriptions, and the values are the corresponding results. You can then iterate through the results and print them, as shown in the code snippet. - -Resetting a SequentialWorkflow -Sometimes, you might need to reset a SequentialWorkflow to start fresh. You can use the workflow.reset_workflow() method for this purpose. - -### Reset the workflow -`workflow.reset_workflow()` -Resetting the workflow clears the results of each task, allowing you to rerun the workflow from the beginning without reinitializing it. - -Updating Task Arguments -You can also update the arguments of a specific task in the workflow using the workflow.update_task() method. - -### Update the arguments of a specific task in the workflow -`workflow.update_task("Generate a 10,000 word blog on health and wellness.", max_loops=2)` - -In this example, we update the max_loops argument of the task with the description "Generate a 10,000 word blog on health and wellness" to 2. This can be useful if you want to change the behavior of a specific task without recreating the entire workflow. - -# Conclusion: Mastering Workflow Orchestration in AI Engineering -In the ever-evolving landscape of artificial intelligence (AI), where the pace of innovation and complexity of tasks are ever-increasing, harnessing the power of workflow orchestration is paramount. In this comprehensive walkthrough guide, we’ve embarked on a journey through the world of workflow orchestration, focusing on the Workflow Class, with a specific emphasis on the SequentialWorkflow class. As we conclude this exploration, we’ve delved deep into the intricacies of orchestrating AI workflows, and it’s time to reflect on the valuable insights gained and the immense potential that this knowledge unlocks for AI engineers. - -## The Art of Workflow Orchestration -At its core, workflow orchestration is the art of designing, managing, and executing sequences of tasks or processes in a structured and efficient manner. In the realm of AI engineering, where tasks can range from data preprocessing and model training to decision-making and autonomous actions, mastering workflow orchestration is a game-changer. It empowers AI engineers to streamline their work, ensure reliable execution, and deliver impactful results. - -The Workflow Class, and particularly the SequentialWorkflow class we’ve explored, acts as a guiding light in this intricate journey. It provides AI engineers with a toolbox of tools and techniques to conquer the challenges of orchestrating AI workflows effectively. Through a disciplined approach and adherence to best practices, AI engineers can achieve the following: - -### 1. Structured Workflow Design -A well-structured workflow is the cornerstone of any successful AI project. The Workflow Class encourages AI engineers to break down complex tasks into manageable units. Each task becomes a building block that contributes to the overarching goal. Whether it’s preprocessing data, training a machine learning model, or generating content, structured workflow design ensures clarity, modularity, and maintainability. - -### 2. Efficient Task Sequencing -In AI, the order of tasks often matters. One task’s output can be another task’s input, and ensuring the correct sequence of execution is crucial. The SequentialWorkflow class enforces this sequential execution, eliminating the risk of running tasks out of order. It ensures that the workflow progresses systematically, following the predefined sequence of tasks. - -### 3. Error Resilience and Recovery -AI systems must be resilient in the face of unexpected errors and failures. The Workflow Class equips AI engineers with error handling strategies, such as retries and fallbacks. These strategies provide the ability to gracefully handle issues, recover from failures, and continue the workflow’s execution without disruption. - -### 4. Code Modularity and Reusability -Building AI workflows often involves implementing various tasks, each with its own logic. The Workflow Class encourages code modularity, allowing AI engineers to encapsulate tasks as separate units. This modularity promotes code reusability, making it easier to adapt and expand workflows as AI projects evolve. - -### 5. Efficient Debugging and Testing -Debugging and testing AI workflows can be challenging without clear structure and boundaries. The Workflow Class provides a clear sequence of tasks with well-defined inputs and outputs. This structure simplifies the debugging process, as AI engineers can isolate and test individual tasks, ensuring that each component functions as intended. - -### 6. Scalability and Adaptability -As AI projects grow in complexity, the Workflow Class scales effortlessly. AI engineers can add or modify tasks as needed, accommodating new data sources, algorithms, or requirements. This scalability ensures that workflows remain adaptable to changing demands and evolving AI landscapes. - -### 7. Maintainability and Future-Proofing -Maintaining AI systems over time is a crucial aspect of engineering. The Workflow Class fosters maintainability by providing a clear roadmap of tasks and their interactions. AI engineers can revisit, update, and extend workflows with confidence, ensuring that AI systems remain effective and relevant in the long run. - -## Empowering AI Engineers -The knowledge and skills gained from this walkthrough guide go beyond technical proficiency. They empower AI engineers to be architects of intelligent systems, capable of orchestrating AI workflows that solve real-world problems. The Workflow Class is a versatile instrument in their hands, enabling them to tackle diverse use cases and engineering challenges. - -## Diverse Use Cases for Workflow Class -Throughout this guide, we explored a myriad of use cases where the Workflow Class shines: - -Natural Language Processing (NLP) Pipelines: In NLP, workflows involve multiple stages, and the Workflow Class ensures orderly execution, resulting in coherent NLP outcomes. - -Data Ingestion and Transformation: Data is the lifeblood of AI, and structured data workflows ensure efficient data preparation for downstream tasks. - -Autonomous Agents and Robotics: For robots and intelligent agents, workflows enable autonomous decision-making and task execution. - -Machine Learning Model Training: Model training workflows encompass numerous steps, and structured orchestration simplifies the process. - -Content Generation and Summarization: Workflows for content generation ensure that generated content meets quality and coherence criteria. - -Adaptive Decision-Making: In dynamic environments, workflows facilitate adaptive decision-making based on real-time data. - -## Efficiency and Maintainability -AI engineers not only have the tools to tackle these use cases but also the means to do so efficiently. The Workflow Class fosters efficiency and maintainability, making AI engineering endeavors more manageable: - -- Modularity: Encapsulate tasks as separate units, promoting code reusability and maintainability. - -- Debugging and Testing: Streamline debugging and testing through clear task boundaries and well-defined inputs and outputs. - -- Scalability: As AI projects grow, workflows scale with ease, accommodating new components and requirements. -Error Handling: Gracefully handle errors and failures, ensuring that AI systems continue to operate smoothly. - -- Maintainability: AI systems remain adaptable and maintainable, even as the AI landscape evolves and requirements change. - -## The Future of AI Engineering -As AI engineering continues to advance, workflow orchestration will play an increasingly pivotal role. The Workflow Class is not a static tool; it is a dynamic enabler of innovation. In the future, we can expect further enhancements and features to meet the evolving demands of AI engineering: - -### 1. Asynchronous Support -Support for asynchronous task execution will improve the efficiency of workflows, especially when tasks involve waiting for external events or resources. - -### 2. Context Managers -Introducing context manager support for tasks can simplify resource management, such as opening and closing files or database connections. - -### 3. Workflow History -Maintaining a detailed history of workflow execution, including timestamps, task durations, and input/output data, will facilitate debugging and performance analysis. - -### 4. Parallel Processing -Enhancing the module to support parallel processing with a pool of workers can significantly speed up the execution of tasks, especially for computationally intensive workflows. - -### 5. Error Handling Strategies -Providing built-in error handling strategies, such as retries, fallbacks, and circuit breakers, will further enhance the resilience of workflows. - -## Closing Thoughts -In conclusion, the journey through workflow orchestration in AI engineering has been both enlightening and empowering. The Workflow Class, and particularly the SequentialWorkflow class, has proven to be an invaluable ally in the AI engineer’s toolkit. It offers structure, modularity, and efficiency, ensuring that AI projects progress smoothly from inception to deployment. - -As AI continues to permeate every aspect of our lives, the skills acquired in this guide will remain highly relevant and sought after. AI engineers armed with workflow orchestration expertise will continue to push the boundaries of what is possible, solving complex problems, and driving innovation. - -But beyond the technical aspects, this guide also emphasizes the importance of creativity, adaptability, and problem-solving. AI engineering is not just about mastering tools; it’s about using them to make a meaningful impact on the world. - -So, whether you’re just starting your journey into AI engineering or you’re a seasoned professional seeking to expand your horizons, remember that the power of workflow orchestration lies not only in the code but in the limitless potential it unlocks for you as an AI engineer. As you embark on your own AI adventures, may this guide serve as a reliable companion, illuminating your path and inspiring your journey towards AI excellence. - -The world of AI is waiting for your innovation and creativity. With workflow orchestration as your guide, you have the tools to shape the future. The possibilities are boundless, and the future is yours to create. - -Official Swarms Links -Here is the Swarms website: - -Here is the Swarms Github: - -Here are the Swarms docs: - -And, join the Swarm community! - -Book a call with The Swarm Corporation here if you’re interested in high performance custom swarms! \ No newline at end of file diff --git a/docs/swarms/utils/check_device.md b/docs/swarms/utils/check_device.md deleted file mode 100644 index a944dc1f..00000000 --- a/docs/swarms/utils/check_device.md +++ /dev/null @@ -1,88 +0,0 @@ -# check_device - -# Module/Function Name: check_device - -The `check_device` is a utility function in PyTorch designed to identify and return the appropriate device(s) for CUDA processing. If CUDA is not available, a CPU device is returned. If CUDA is available, the function returns a list of all available GPU devices. - -The function examines the CUDA availability, checks for multiple GPUs, and finds additional properties for each device. - -## Function Signature and Arguments - -**Signature:** -```python -def check_device( - log_level: Any = logging.INFO, - memory_threshold: float = 0.8, - capability_threshold: float = 3.5, - return_type: str = "list", -) -> Union[torch.device, List[torch.device]] -``` - -| Parameter | Data Type | Default Value | Description | -| ------------- | ------------- | ------------- | ------------- | -| `log_level` | Any | logging.INFO | The log level. | -| `memory_threshold` | float | 0.8 | It is used to check the threshold of memory used on the GPU(s). | -| `capability_threshold` | float | 3.5 | It is used to consider only those GPU(s) which have higher compute capability compared to the threshold. | -| `return_type` | str | "list" | Depending on the `return_type` either a list of devices can be returned or a single device. | - -This function does not take any mandatory argument. However, it supports optional arguments such as `log_level`, `memory_threshold`, `capability_threshold`, and `return_type`. - -**Returns:** - -- A single torch.device if one device or list of torch.devices if multiple CUDA devices are available, else returns the CPU device if CUDA is not available. - -## Usage and Examples - -### Example 1: Basic Usage - -```python -import logging - -import torch - -from swarms.utils import check_device - -# Basic usage -device = check_device( - log_level=logging.INFO, - memory_threshold=0.8, - capability_threshold=3.5, - return_type="list", -) -``` - -### Example 2: Using CPU when CUDA is not available - -```python -import torch - -from swarms.utils import check_device - -# When CUDA is not available -device = check_device() -print(device) # If CUDA is not available it should return torch.device('cpu') -``` - -### Example 3: Multiple GPU Available - -```python -import torch - -from swarms.utils import check_device - -# When multiple GPUs are available -device = check_device() -print(device) # Should return a list of available GPU devices -``` - -## Tips and Additional Information - -- This function is useful when a user wants to exploit CUDA capabilities for faster computation but unsure of the available devices. This function abstracts all the necessary checks and provides a list of CUDA devices to the user. -- The `memory_threshold` and `capability_threshold` are utilized to filter the GPU devices. The GPUs which have memory usage above the `memory_threshold` and compute capability below the `capability_threshold` are not considered. -- As of now, CPU does not have memory or capability values, therefore, in the respective cases, it will be returned as default without any comparison. - -## Relevant Resources - -- For more details about the CUDA properties functions used (`torch.cuda.get_device_capability, torch.cuda.get_device_properties`), please refer to the official PyTorch [CUDA semantics documentation](https://pytorch.org/docs/stable/notes/cuda.html). -- For more information about Torch device objects, you can refer to the official PyTorch [device documentation](https://pytorch.org/docs/stable/tensor_attributes.html#torch-device). -- For a better understanding of how the `logging` module works in Python, see the official Python [logging documentation](https://docs.python.org/3/library/logging.html). diff --git a/docs/swarms/utils/display_markdown_message.md b/docs/swarms/utils/display_markdown_message.md deleted file mode 100644 index c1e3f894..00000000 --- a/docs/swarms/utils/display_markdown_message.md +++ /dev/null @@ -1,86 +0,0 @@ -# display_markdown_message - -# Module Name: `display_markdown_message` - -## Introduction - -`display_markdown_message` is a useful utility function for creating visually-pleasing markdown messages within Python scripts. This function automatically manages multiline strings with lots of indentation and makes single-line messages with ">" tags easy to read, providing users with convenient and elegant logging or messaging capacity. - -## Function Definition and Arguments - -Function Definition: -```python -def display_markdown_message(message: str, color: str = "cyan"): - ``` -This function accepts two parameters: - -|Parameter |Type |Default Value |Description | -|--- |--- |--- |--- | -|message |str |None |This is the message that is to be displayed. This should be a string. It can contain markdown syntax.| -|color |str |"cyan" |This allows you to choose the color of the message. Default is "cyan". Accepts any valid color name.| - -## Functionality and Usage - -This utility function is used to display a markdown formatted message on the console. It accepts a message as a string and an optional color for the message. The function is ideal for generating stylized print outputs such as headers, status updates or pretty notifications. - -By default, any text within the string which is enclosed within `>` tags or `---` is treated specially: - -- Lines encased in `>` tags are rendered as a blockquote in markdown. -- Lines consisting of `---` are rendered as horizontal rules. - -The function automatically strips off leading and trailing whitespaces from any line within the message, maintaining aesthetic consistency in your console output. - -### Usage Examples - -#### Basic Example - -```python -display_markdown_message("> This is an important message", color="red") -``` - -Output: -```md -> **This is an important message** -``` - -This example will print out the string "This is an important message" in red color, enclosed in a blockquote tag. - -#### Multiline Example - -```python -message = """ -> Header - -My normal message here. - ---- - -Another important information -""" -display_markdown_message(message, color="green") -``` - -Output: -```md -> **Header** - -My normal message here. -_____ - -Another important information -``` -The output is a green colored markdown styled text with the "Header" enclosed in a blockquote, followed by the phrase "My normal message here", a horizontal rule, and finally another phrase, "Another important information". - -## Additional Information - -Use newline characters `\n` to separate the lines of the message. Remember, each line of the message is stripped of leading and trailing whitespaces. If you have special markdown requirements, you may need to revise the input message string accordingly. - -Also, keep in mind the console or terminal's ability to display the chosen color. If a particular console does not support the chosen color, the output may fallback to the default console color. - -For a full list of color names supported by the `Console` module, refer to the official [Console documentation](http://console.readthedocs.io/). - -## References and Resources - -- Python Strings: https://docs.python.org/3/tutorial/introduction.html#strings -- Python Markdown: https://pypi.org/project/markdown/ -- Console module: https://console.readthedocs.io/ diff --git a/docs/swarms/utils/extract_code_from_markdown.md b/docs/swarms/utils/extract_code_from_markdown.md deleted file mode 100644 index fdef5018..00000000 --- a/docs/swarms/utils/extract_code_from_markdown.md +++ /dev/null @@ -1,118 +0,0 @@ -# extract_code_from_markdown - -# swarms.utils Module - -The `swarms.utils` module provides utility functions designed to facilitate specific tasks within the main Swarm codebase. The function `extract_code_from_markdown` is a critical function within this module that we will document in this example. - -## Overview and Introduction - -Many software projects use Markdown extensively for writing documentation, tutorials, and other text documents that can be easily rendered and viewed in different formats, including HTML. - -The `extract_code_from_markdown` function plays a crucial role within the swarms.utils library. As developers write large volumes of Markdown, they often need to isolate code snippets from the whole Markdown file body. These isolated snippets can be used to generate test cases, transform into other languages, or analyze for metrics. - -## Function Definition: `extract_code_from_markdown` - -```python -def extract_code_from_markdown(markdown_content: str) -> str: - """ - Extracts code blocks from a Markdown string and returns them as a single string. - - Args: - - markdown_content (str): The Markdown content as a string. - - Returns: - - str: A single string containing all the code blocks separated by newlines. - """ - # Regular expression for fenced code blocks - pattern = r"```(?:\w+\n)?(.*?)```" - matches = re.findall(pattern, markdown_content, re.DOTALL) - - # Concatenate all code blocks separated by newlines - return "\n".join(code.strip() for code in matches) -``` - -### Arguments - -The function `extract_code_from_markdown` takes one argument: - -| Argument | Description | Type | Default Value | -|-----------------------|----------------------------------------|-------------|-------------------| -| markdown_content | The input markdown content as a string | str | N/A | - - -## Function Explanation and Usage - -This function uses a regular expression to find all fenced code blocks in a Markdown string. The pattern `r"```(?:\w+\n)?(.*?)```"` matches strings that start and end with three backticks, optionally followed by a newline and then any number of any characters (the `.*?` part) until the first occurrence of another triple backtick set. - -Once we have the matches, we join all the code blocks into a single string, each block separated by a newline. - -The method's functionality is particularly useful when we need to extract code blocks from markdown content for secondary processing, such as syntax highlighting or execution in a different environment. - -### Usage Examples - -Below are three examples of how you might use this function: - -#### Example 1: - -Extracting code blocks from a simple markdown string. - -```python -from swarms.utils import extract_code_from_markdown - -markdown_string = """# Example -This is an example of a code block: -```python -print("Hello World!") -``` """ -print(extract_code_from_markdown(markdown_string)) -``` - -#### Example 2: - -Extracting code blocks from a markdown file. - -```python -import re - - -def extract_code_from_markdown(markdown_content: str) -> str: - pattern = r"```(?:\w+\n)?(.*?)```" - matches = re.findall(pattern, markdown_content, re.DOTALL) - return "\n".join(code.strip() for code in matches) - - -# Assume that 'example.md' contains multiple code blocks -with open("example.md") as file: - markdown_content = file.read() -print(extract_code_from_markdown(markdown_content)) -``` - -#### Example 3: - -Using the function in a pipeline to extract and then analyze code blocks. - -```python -import re - - -def extract_code_from_markdown(markdown_content: str) -> str: - pattern = r"```(?:\w+\n)?(.*?)```" - matches = re.findall(pattern, markdown_content, re.DOTALL) - return "\n".join(code.strip() for code in matches) - - -def analyze_code_blocks(code: str): - # Add your analysis logic here - pass - - -# Assume that 'example.md' contains multiple code blocks -with open("example.md") as file: - markdown_content = file.read() -code_blocks = extract_code_from_markdown(markdown_content) -analyze_code_blocks(code_blocks) -``` - -## Conclusion - -This concludes the detailed documentation of the `extract_code_from_markdown` function from the swarms.utils module. With this documentation, you should be able to understand the function's purpose, how it works, its parameters, and see examples of how to use it effectively. diff --git a/docs/swarms/utils/find_image_path.md b/docs/swarms/utils/find_image_path.md deleted file mode 100644 index 844cbe78..00000000 --- a/docs/swarms/utils/find_image_path.md +++ /dev/null @@ -1,90 +0,0 @@ -# find_image_path - -Firstly, we will divide this documentation into multiple sections. - -# Overview -The module **swarms.utils** has the main goal of providing necessary utility functions that are crucial during the creation of the swarm intelligence frameworks. These utility functions can include common operations such as handling input-output operations for files, handling text parsing, and handling basic mathematical computations necessary during the creation of swarm intelligence models. - -The current function `find_image_path` in the module is aimed at extracting an image path from a given text document. - -# Function Detailed Explanation - -## Definition -The function `find_image_path` takes a singular argument as an input: - -```python -def find_image_path(text): - # function body -``` - -## Parameter -The parameter `text` in the function is a string that represents the document or text from which the function is trying to extract all paths to the images present. The function scans the given text, looking for absolute or relative paths to image files (.png, .jpg, .jpeg) on the disk. - -| Parameter Name | Data Type | Default Value | Description | -|:--------------:|:---------:|:-------------:|:--------:| -| `text` | `str` | - | The text content to scan for image paths | - -## Return Value - -The return value of the function `find_image_path` is a string that represents the longest existing image path extracted from the input text. If no image paths exist within the text, the function returns `None`. - - -| Return Value | Data Type | Description | -|:------------:|:-----------:|:-----------:| -| Path | `str` | Longest image path found in the text or `None` if no path found | - -# Function's Code - -The function `find_image_path` performs text parsing and pattern recognition to find image paths within the provided text. The function uses `regular expressions (re)` module to detect all potential paths. - -```python -def find_image_path(text): - pattern = r"([A-Za-z]:\\[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))|(/[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))" - matches = [match.group() for match in re.finditer(pattern, text) if match.group()] - matches += [match.replace("\\", "") for match in matches if match] - existing_paths = [match for match in matches if os.path.exists(match)] - return max(existing_paths, key=len) if existing_paths else None -``` - -# Usage Examples - -Let's consider examples of how the function `find_image_path` can be used in different scenarios. - -**Example 1:** - -Consider the case where a text without any image path is provided. - -```python -from swarms.utils import find_image_path - -text = "There are no image paths in this text" -print(find_image_path(text)) # Outputs: None -``` - -**Example 2:** - -Consider the case where the text has multiple image paths. - -```python -from swarms.utils import find_image_path - -text = "Here is an image path: /home/user/image1.png. Here is another one: C:\\Users\\User\\Documents\\image2.jpeg" -print( - find_image_path(text) -) # Outputs: the longest image path (depends on your file system and existing files) -``` - -**Example 3:** - -In the final example, we consider a case where the text has an image path, but the file does not exist. - -```python -from swarms.utils import find_image_path - -text = "Here is an image path: /home/user/non_existant.png" -print(find_image_path(text)) # Outputs: None -``` - -# Closing Notes - -In conclusion, the `find_image_path` function is crucial in the `swarms.utils` module as it supports a key operation of identifying image paths within given input text. This allows users to automate the extraction of such data from larger documents/text. However, it's important to note the function returns only existing paths in your file system and only the longest if multiple exist. diff --git a/docs/swarms/utils/limit_tokens_from_string.md b/docs/swarms/utils/limit_tokens_from_string.md deleted file mode 100644 index bc2cf8cf..00000000 --- a/docs/swarms/utils/limit_tokens_from_string.md +++ /dev/null @@ -1,82 +0,0 @@ -# limit_tokens_from_string - -## Introduction -The `Swarms.utils` library contains utility functions used across codes that handle machine learning and other operations. The `Swarms.utils` library includes a notable function named `limit_tokens_from_string()`. This function particularly limits the number of tokens in a given string. - -# Function: limit_tokens_from_string() -Within the `Swarms.utils` library, there is a method `limit_tokens_from_string(string: str, model: str = "gpt-4", limit: int = 500) -> str:` - -## Description -The function `limit_tokens_from_string()` limits the number of tokens in a given string based on the specified threshold. It is primarily useful when you are handling large text data and need to chunk or limit your text to a certain length. Limiting token length could be useful in various scenarios such as when working with data with limited computational resources, or when dealing with models that accept a specific maximum limit of text. - -## Parameters - -| Parameter | Type | Default Value | Description -| :-----------| :----------- | :------------ | :------------| -| `string` | `str` | `None` | The input string from which the tokens need to be limited. | -| `model` | `str` | `"gpt-4"` | The model used to encode and decode the token. The function defaults to `gpt-4` but you can specify any model supported by `tiktoken`. If a model is not found, it falls back to use `gpt2` | -| `limit` | `int` | `500` | The limit up to which the tokens have to be sliced. Default limit is 500.| - -## Returns - -| Return | Type | Description -| :-----------| :----------- | :------------ -| `out` | `str` | A string that is constructed back from the encoded tokens that have been limited to a count of `limit` | - -## Method Detail and Usage Examples - -The method `limit_tokens_from_string()` takes in three parameters - `string`, `model`, and `limit`. - - -First, it tries to get the encoding for the model specified in the `model` argument using `tiktoken.encoding_for_model(model)`. In case the specified model is not found, the function uses `gpt2` model encoding as a fallback. - -Next, the input `string` is tokenized using the `encode` method on the `encoding` tensor. This results in the `encoded` tensor. - -Then, the function slices the `encoded` tensor to get the first `limit` number of tokens. - -Finally, the function converts back the tokens into the string using the `decode` method of the `encoding` tensor. The resulting string `out` is returned. - -### Example 1: - -```python -from swarms.utils import limit_tokens_from_string - -# longer input string -string = "This is a very long string that needs to be tokenized. This string might exceed the maximum token limit, so it will need to be truncated." - -# lower token limit -limit = 10 - -output = limit_tokens_from_string(string, limit=limit) -``` - -### Example 2: - -```python -from swarms.utils import limit_tokens_from_string - -# longer input string with different model -string = "This string will be tokenized using gpt2 model. If the string is too long, it will be truncated." - -# model -model = "gpt2" - -output = limit_tokens_from_string(string, model=model) -``` - -### Example 3: - -```python -from swarms.utils import limit_tokens_from_string - -# try with a random model string -string = "In case the method does not find the specified model, it will fall back to gpt2 model." - -# model -model = "gpt-4" - -output = limit_tokens_from_string(string, model=model) -``` - -**Note:** If specifying a model not supported by `tiktoken` intentionally, it will fall back to `gpt2` model for encoding. - diff --git a/docs/swarms/utils/load_model_torch.md b/docs/swarms/utils/load_model_torch.md deleted file mode 100644 index 7effabb6..00000000 --- a/docs/swarms/utils/load_model_torch.md +++ /dev/null @@ -1,105 +0,0 @@ -# load_model_torch - -# load_model_torch: Utility Function Documentation - -## Introduction: - -`load_model_torch` is a utility function in the `swarms.utils` library that is designed to load a saved PyTorch model and move it to the designated device. It provides flexibility allowing the user to specify the model file location, the device where the loaded model should be moved to, whether to strictly enforce the keys in the state dictionary to match the keys returned by the model's `state_dict()`, and many more. - -Moreover, if the saved model file only contains the state dictionary, but not the model architecture, you can pass the model architecture as an argument. - -## Function Definition and Parameters: - -```python -def load_model_torch( - model_path: str = None, - device: torch.device = None, - model: nn.Module = None, - strict: bool = True, - map_location=None, - *args, - **kwargs, -) -> nn.Module: -``` - -The following table describes the parameters in detail: - -| Name | Type | Default Value | Description | -| ------ | ------ | ------------- | ------------| -| model_path | str | None | A string specifying the path to the saved model file on disk. _Required_ | -| device | torch.device | None | A `torch.device` object that specifies the target device for the loaded model. If not provided, the function checks for the availability of a GPU and uses it if available. If not, it defaults to CPU. | -| model | nn.Module | None | An instance of `torch.nn.Module` representing the model's architecture. This parameter is required if the model file only contains the model's state dictionary and not the model architecture. | -| strict | bool | True | A boolean that determines whether to strictly enforce that the keys in the state dictionary match the keys returned by the model's `state_dict()` function. If set to `True`, the function will raise a KeyError when the state dictionary and `state_dict()` keys do not match. | -| map_location | callable | None | A function to remap the storage locations of the loaded model's parameters. Useful for loading models saved on a device type that is different from the current one. | -| *args, **kwargs | - | - | Additional arguments and keyword arguments to be passed to `torch.load`. - -Returns: - -- `torch.nn.Module` - The loaded model after moving it to the desired device. - -Raises: - -- `FileNotFoundError` - If the saved model file is not found at the specified path. -- `RuntimeError` - If there was an error while loading the model. - -## Example of Usage: - -This function can be used directly inside your code as shown in the following examples: - -### Example 1: -Loading a model without specifying a device results in the function choosing the most optimal available device automatically. - -```python -import torch.nn as nn - -from swarms.utils import load_model_torch - -# Assume `mymodel.pth` is in the current directory -model_path = "./mymodel.pth" - - -# Define your model architecture if the model file only contains state dict -class MyModel(nn.Module): - def __init__(self): - super().__init__() - self.linear = nn.Linear(10, 2) - - def forward(self, x): - return self.linear(x) - - -model = MyModel() - -# Load the model -loaded_model = load_model_torch(model_path, model=model) - -# Now you can use the loaded model for prediction or further training -``` -### Example 2: -Explicitly specifying a device. - -```python -# Assume `mymodel.pth` is in the current directory -model_path = "./mymodel.pth" -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# Load the model -loaded_model = load_model_torch(model_path, device=device) -``` - -### Example 3: -Using a model file that contains only the state dictionary, not the model architecture. - -```python -# Assume `mymodel_state_dict.pth` is in the current directory -model_path = "./mymodel_state_dict.pth" -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# Define your model architecture -model = MyModel() - -# Load the model -loaded_model = load_model_torch(model_path, device=device, model=model) -``` - -This gives you an insight on how to use `load_model_torch` utility function from `swarms.utils` library efficiently. Always remember to pass the model path argument while the other arguments can be optional based on your requirements. Furthermore, handle exceptions properly for smooth functioning of your PyTorch related projects. diff --git a/docs/swarms/utils/math_eval.md b/docs/swarms/utils/math_eval.md deleted file mode 100644 index 19eb9517..00000000 --- a/docs/swarms/utils/math_eval.md +++ /dev/null @@ -1,79 +0,0 @@ -# math_eval - - -The `math_eval` function is a python decorator that wraps around a function to run two functions on the same inputs and compare their results. The decorator can be used for testing functions that are expected to have equivalent functionality, or in situations where two different methods are used to calculate or retrieve a value, and the results need to be compared. - -The `math_eval` function in this case accepts two functions as parameters: `func1` and `func2`, and returns a decorator. This returned decorator, when applied to a function, enhances that function to execute both `func1` and `func2`, and compare the results. - -This can be particularly useful in situations when you are implementing a new function and wants to compare its behavior and results with that of an existing one under the same set of input parameters. It also logs the results if they do not match which could be quite useful during the debug process. - -## Usage Example - -Let's say you have two functions: `ground_truth` and `generated_func`, that have similar functionalities or serve the same purpose. You are writing a new function called `test_func`, and you'd like to compare the results of `ground_truth` and `generated_func` when `test_func` is run. Here is how you would use the `math_eval` decorator: - -```python -@math_eval(ground_truth, generated_func) -def test_func(x): - return x - - -result1, result2 = test_func(5) -print(f"Result from ground_truth: {result1}") -print(f"Result from generated_func: {result2}") -``` - -## Parameters - -| Parameter | Data Type | Description | -| ---- | ---- | ---- | -| func1 | Callable | The first function whose result you want to compare. | -| func2 | Callable | The second function whose result you want to compare. | - -The data types for `func1` and `func2` cannot be specified as they can be any python function (or callable object). The decorator verifies that they are callable and exceptions are handled within the decorator function. - -## Return Values - -The `math_eval` function does not return a direct value, since it is a decorator. When applied to a function, it alters the behavior of the wrapped function to return two values: - -1. `result1`: The result of running `func1` with the given input parameters. -2. `result2`: The result of running `func2` with the given input parameters. - -These two return values are provided in that order as a tuple. - -## Source Code - -Here's how to implement the `math_eval` decorator: - -```python -import functools -import logging - - -def math_eval(func1, func2): - """Math evaluation decorator.""" - - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - result1 = func1(*args, **kwargs) - except Exception as e: - logging.error(f"Error in func1: {e}") - result1 = None - - try: - result2 = func2(*args, **kwargs) - except Exception as e: - logging.error(f"Error in func2: {e}") - result2 = None - - if result1 != result2: - logging.warning(f"Outputs do not match: {result1} != {result2}") - - return result1, result2 - - return wrapper - - return decorator -``` -Please note that the code is logging exceptions to facilitate debugging, but the actual processing and handling of the exception would depend on how you want your application to respond to exceptions. Therefore, you may want to customize the error handling depending upon your application's requirements. diff --git a/docs/swarms/utils/metrics_decorator.md b/docs/swarms/utils/metrics_decorator.md deleted file mode 100644 index 17850ba1..00000000 --- a/docs/swarms/utils/metrics_decorator.md +++ /dev/null @@ -1,87 +0,0 @@ -# metrics_decorator - -This documentation explains the use and functionality of the `metrics_decorator` function in the LLM (Large Language Models). - -The `metrics_decorator` function is a standard Python decorator that augments a specific function by wrapping extra functionality around it. It is commonly used for things like timing, logging or memoization. --- -The `metrics_decorator` in LLM is specially designed to measure and calculate three key performance metrics when generating language models: - -1. `Time to First Token`: Measures the elapsed time from the start of function execution until the generation of the first token. -2. `Generation Latency`: It measures the total time taken for a complete run. -3. `Throughput`: Calculates the rate of production of tokens per unit of time. - -```python -def metrics_decorator(func: Callable): - """ - - Metrics decorator for LLM - - Args: - func (Callable): The function to be decorated. - - """ - - @wraps(func) - def wrapper(self, *args, **kwargs): - """ - An inner function that wraps the decorated function. It calculates 'Time to First Token', - 'Generation Latency' and 'Throughput' metrics. - - Args: - self : The object instance. - *args : Variable length argument list of the decorated function. - **kwargs : Arbitrary keyword arguments of the decorated function. - """ - - # Measure Time to First Token - start_time = time.time() - result = func(self, *args, **kwargs) - first_token_time = time.time() - - # Measure Generation Latency - end_time = time.time() - - # Calculate Throughput (assuming the function returns a list of tokens) - throughput = len(result) / (end_time - start_time) - - return f""" - Time to First Token: {first_token_time - start_time} - Generation Latency: {end_time - start_time} - Throughput: {throughput} - """ - - return wrapper -``` -## Example Usage -Now let's discuss the usage of the `metrics_decorator` function with an example. - -Assuming that we have a language generation function called `text_generator()` that generates a list of tokens. - -```python -@metrics_decorator -def text_generator(self, text: str): - """ - Args: - text (str): The input text. - - Returns: - A list of tokens generated from the input text. - """ - # language generation implementation goes here - return tokens - - -# Instantiate the class and call the decorated function -obj = ClassName() -obj.text_generator("Hello, world!") -``` - -When the decorated `text_generator()` function is called, it will measure and return: - -- Time elapsed until the first token is generated. -- The total execution time of the function. -- The rate of tokens generation per unit time. - -This example provides a basic overview of how a function can be decorated with the `metrics_decorator`. The provided `func` argument could be any method from any class, as long as it complies with the structure defined in `metrics_decorator`. It is worth noting that the decorated function must return a list of tokens for the `Throughput` metric to work correctly. - -Remember, applying the `metrics_decorator` does not affect the original functionality of the decorated function, it just adds additional measurement and logging capabilities to it. It's a great utility for tracking and optimizing the performance of your language models. diff --git a/docs/swarms/utils/pdf_to_text.md b/docs/swarms/utils/pdf_to_text.md deleted file mode 100644 index 3ec73039..00000000 --- a/docs/swarms/utils/pdf_to_text.md +++ /dev/null @@ -1,71 +0,0 @@ -# pdf_to_text - -## Introduction -The function `pdf_to_text` is a Python utility for converting a PDF file into a string of text content. It leverages the `pypdf` library, an excellent Python library for processing PDF files. The function takes in a PDF file's path and reads its content, subsequently returning the extracted textual data. - -This function can be very useful when you want to extract textual information from PDF files automatically. For instance, when processing a large number of documents, performing textual analysis, or when you're dealing with text data that is only available in PDF format. - -## Class / Function Definition - -`pdf_to_text` is a standalone function defined as follows: - -```python -def pdf_to_text(pdf_path: str) -> str: -``` - -## Parameters - -| Parameter | Type | Description | -|:-:|---|---| -| pdf_path | str | The path to the PDF file to be converted | - -## Returns - -| Return Value | Type | Description | -|:-:|---|---| -| text | str | The text extracted from the PDF file. | - -## Raises - -| Exception | Description | -|---|---| -| FileNotFoundError | If the PDF file is not found at the specified path. | -| Exception | If there is an error in reading the PDF file. | - -## Function Description - -`pdf_to_text` utilises the `PdfReader` function from the `pypdf` library to read the PDF file. If the PDF file does not exist at the specified path or there was an error while reading the file, appropriate exceptions will be raised. It then iterates through each page in the PDF and uses the `extract_text` function to extract the text content from each page. These contents are then concatenated into a single variable and returned as the result. - -## Usage Examples - -To use this function, you first need to install the `pypdf` library. It can be installed via pip: - -```python -!pip install pypdf -``` - -Then, you should import the `pdf_to_text` function: - -```python -from swarms.utils import pdf_to_text -``` - -Here is an example of how to use `pdf_to_text`: - -```python -# Define the path to the pdf file -pdf_path = "sample.pdf" - -# Use the function to extract text -text = pdf_to_text(pdf_path) - -# Print the extracted text -print(text) -``` - -## Tips and Additional Information -- Ensure that the PDF file path is valid and that the file exists at the specified location. If the file does not exist, a `FileNotFoundError` will be raised. -- This function reads the text from the PDF. It does not handle images, graphical elements, or any non-text content. -- If the PDF contains scanned images rather than textual data, the `extract_text` function may not be able to extract any text. In such cases, you would require OCR (Optical Character Recognition) tools to extract the text. -- Be aware of the possibility that the output string might contain special characters or escape sequences because they were part of the PDF's content. You might need to clean the resulting text according to your requirements. -- The function uses the pypdf library to facilitate the PDF reading and text extraction. For any issues related to PDF manipulation, consult the [pypdf library documentation](https://pypdf.readthedocs.io/en/stable/). diff --git a/docs/swarms/utils/prep_torch_inference.md b/docs/swarms/utils/prep_torch_inference.md deleted file mode 100644 index 0fde2503..00000000 --- a/docs/swarms/utils/prep_torch_inference.md +++ /dev/null @@ -1,103 +0,0 @@ -# prep_torch_inference - -```python -def prep_torch_inference( - model_path: str = None, - device: torch.device = None, - *args, - **kwargs, -): - """ - Prepare a Torch model for inference. - - Args: - model_path (str): Path to the model file. - device (torch.device): Device to run the model on. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Returns: - torch.nn.Module: The prepared model. - """ - try: - model = load_model_torch(model_path, device) - model.eval() - return model - except Exception as e: - # Add error handling code here - print(f"Error occurred while preparing Torch model: {e}") - return None -``` -This method is part of the 'swarms.utils' module. It accepts a model file path and a torch device as input and returns a model that is ready for inference. - -## Detailed Functionality - -The method loads a PyTorch model from the file specified by `model_path`. This model is then moved to the specified `device` if it is provided. Subsequently, the method sets the model to evaluation mode by calling `model.eval()`. This is a crucial step when preparing a model for inference, as certain layers like dropout or batch normalization behave differently during training vs during evaluation. -In the case of any exception (e.g., the model file not found or the device unavailable), it prints an error message and returns `None`. - -## Parameters - -| Parameter | Type | Description | Default | -|-----------|------|-------------|---------| -| model_path | str | Path to the model file. | None | -| device | torch.device | Device to run the model on. | None | -| args | tuple | Additional positional arguments. | None | -| kwargs | dict | Additional keyword arguments. | None | - -## Returns - -| Type | Description | -|------|-------------| -| torch.nn.Module | The prepared model ready for inference. Returns `None` if any exception occurs. | - -## Usage Examples - -Here are some examples of how you can use the `prep_torch_inference` method. Before that, you need to import the necessary modules as follows: - -```python -import torch - -from swarms.utils import load_model_torch, prep_torch_inference -``` - -### Example 1: Load a model for inference on CPU - -```python -model_path = "saved_model.pth" -model = prep_torch_inference(model_path) - -if model is not None: - print("Model loaded successfully and is ready for inference.") -else: - print("Failed to load the model.") -``` - -### Example 2: Load a model for inference on CUDA device - -```python -model_path = "saved_model.pth" -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -model = prep_torch_inference(model_path, device) - -if model is not None: - print(f"Model loaded successfully on device {device} and is ready for inference.") -else: - print("Failed to load the model.") -``` - -### Example 3: Load a model with additional arguments for `load_model_torch` - -```python -model_path = "saved_model.pth" -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - -# Suppose load_model_torch accepts an additional argument, map_location -model = prep_torch_inference(model_path, device, map_location=device) - -if model is not None: - print(f"Model loaded successfully on device {device} and is ready for inference.") -else: - print("Failed to load the model.") -``` - -Please note, you need to ensure the given model path does exist and the device is available on your machine, else `prep_torch_inference` method will return `None`. Depending on the complexity and size of your models, loading them onto a specific device might take a while. So it's important that you take this into consideration when designing your machine learning workflows. diff --git a/docs/swarms/utils/print_class_parameters.md b/docs/swarms/utils/print_class_parameters.md deleted file mode 100644 index 84e0104f..00000000 --- a/docs/swarms/utils/print_class_parameters.md +++ /dev/null @@ -1,110 +0,0 @@ -# print_class_parameters - -# Module Function Name: print_class_parameters - -The `print_class_parameters` function is a utility function developed to help developers and users alike in retrieving and printing the parameters of a class constructor in Python, either in standard output or returned as a dictionary if the `api_format` is set to `True`. - -This utility function utilizes the `inspect` module to fetch the signature of the class constructor and fetches the parameters from the obtained signature. The parameter values and their respective types are then outputted. - -This function allows developers to easily inspect and understand the class' constructor parameters without the need to individually go through the class structure. This eases the testing and debugging process for developers and users alike, aiding in generating more efficient and readable code. - -__Function Definition:__ - -```python -def print_class_parameters(cls, api_format: bool = False): -``` -__Parameters:__ - -| Parameter | Type | Description | Default value | -|---|---|---|---| -| cls | type | The Python class to inspect. | None | -| api_format | bool | Flag to determine if the output should be returned in dictionary format (if set to True) or printed out (if set to False) | False | - -__Functionality and Usage:__ - -Inside the `print_class_parameters` function, it starts by getting the signature of the constructor of the inputted class by invoking `inspect.signature(cls.__init__)`. It then extracts the parameters from the signature and stores it in the `params` variable. - -If the `api_format` argument is set to `True`, instead of printing the parameters and their types, it stores them inside a dictionary where each key-value pair is a parameter name and its type. It then returns this dictionary. - -If `api_format` is set to `False` or not set at all (defaulting to False), the function iterates over the parameters and prints the parameter name and its type. "self" parameters are excluded from the output as they are inherent to all class methods in Python. - -A possible exception that may occur during the execution of this function is during the invocation of the `inspect.signature()` function call. If the inputted class does not have an `__init__` method or any error occurs during the retrieval of the class constructor's signature, an exception will be triggered. In that case, an error message that includes the error details is printed out. - -__Usage and Examples:__ - -Assuming the existence of a class: - -```python -class Agent: - def __init__(self, x: int, y: int): - self.x = x - self.y = y -``` - -One could use `print_class_parameters` in its typical usage: - -```python -print_class_parameters(Agent) -``` - -Results in: - -``` -Parameter: x, Type: -Parameter: y, Type: -``` - -Or, with `api_format` set to `True` - -```python -output = print_class_parameters(Agent, api_format=True) -print(output) -``` - -Results in: - -``` -{'x': "", 'y': ""} -``` - -__Note:__ - -The function `print_class_parameters` is not limited to custom classes. It can inspect built-in Python classes such as `list`, `dict`, and others. However, it is most useful when inspecting custom-defined classes that aren't inherently documented in Python or third-party libraries. - -__Source Code__ - -```python -def print_class_parameters(cls, api_format: bool = False): - """ - Print the parameters of a class constructor. - - Parameters: - cls (type): The class to inspect. - - Example: - >>> print_class_parameters(Agent) - Parameter: x, Type: - Parameter: y, Type: - """ - try: - # Get the parameters of the class constructor - sig = inspect.signature(cls.__init__) - params = sig.parameters - - if api_format: - param_dict = {} - for name, param in params.items(): - if name == "self": - continue - param_dict[name] = str(param.annotation) - return param_dict - - # Print the parameters - for name, param in params.items(): - if name == "self": - continue - print(f"Parameter: {name}, Type: {param.annotation}") - - except Exception as e: - print(f"An error occurred while inspecting the class: {e}") -``` From 9d2d8864e3acb2c57df6725cb8340f6c7055cdef Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 12:58:41 -0700 Subject: [PATCH 12/13] [CLEANUP] --- docs/mkdocs.yml | 2 +- docs/swarms/framework_structure.md | 127 +++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+), 1 deletion(-) create mode 100644 docs/swarms/framework_structure.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index e977418f..401ec44b 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -107,7 +107,7 @@ nav: - Workflows: "swarms/structs/workflows.md" - Multi-Agent Architectures: "swarms/structs/multi_agent_architectures.md" - Reference: - - Overview: "swarms/index.md" + - Overview: "swarms/framework_structure.md" - Models: - How to Create A Custom Language Model: "swarms/models/custom_model.md" - Models Available: "swarms/models/index.md" diff --git a/docs/swarms/framework_structure.md b/docs/swarms/framework_structure.md new file mode 100644 index 00000000..7540a71a --- /dev/null +++ b/docs/swarms/framework_structure.md @@ -0,0 +1,127 @@ +## MixtureOfAgents Framework Conceptual Breakdown + +The `MixtureOfAgents` framework is a sophisticated structure designed to orchestrate the collaborative work of multiple agents in a hierarchical manner. This breakdown provides a conceptual and visual representation of the framework, highlighting the interactions between models, tools, memory, agents, and swarms. + +### Hierarchical Structure + +The framework can be visualized as a multi-layered hierarchy: + +1. **Models, Tools, Memory**: These form the foundational components that agents utilize to perform tasks. +2. **Agents**: Individual entities that encapsulate specific functionalities, utilizing models, tools, and memory. +3. **Swarm**: A collection of multiple agents working together in a coordinated manner. +4. **Structs**: High-level structures that organize and manage swarms, enabling complex workflows and interactions. + +### Visual Representation + +Below are visual graphs illustrating the hierarchical and tree structure of the `MixtureOfAgents` framework. + +#### 1. Foundational Components: Models, Tools, Memory + +```mermaid +graph TD; + Models --> Agents + Tools --> Agents + Memory --> Agents + subgraph Foundational_Components + Models + Tools + Memory + end +``` + +#### 2. Agents and Their Interactions + +```mermaid +graph TD; + Agents --> Swarm + subgraph Agents_Collection + Agent1 + Agent2 + Agent3 + end + subgraph Individual_Agents + Agent1 --> Models + Agent1 --> Tools + Agent1 --> Memory + Agent2 --> Models + Agent2 --> Tools + Agent2 --> Memory + Agent3 --> Models + Agent3 --> Tools + Agent3 --> Memory + end +``` + +#### 3. Multiple Agents Form a Swarm + +```mermaid +graph TD; + Swarm1 --> Struct + Swarm2 --> Struct + Swarm3 --> Struct + subgraph Swarms_Collection + Swarm1 + Swarm2 + Swarm3 + end + subgraph Individual_Swarms + Swarm1 --> Agent1 + Swarm1 --> Agent2 + Swarm1 --> Agent3 + Swarm2 --> Agent4 + Swarm2 --> Agent5 + Swarm2 --> Agent6 + Swarm3 --> Agent7 + Swarm3 --> Agent8 + Swarm3 --> Agent9 + end +``` + +#### 4. Structs Organizing Multiple Swarms + +```mermaid +graph TD; + Struct --> Swarms_Collection + subgraph High_Level_Structs + Struct1 + Struct2 + Struct3 + end + subgraph Struct1 + Swarm1 + Swarm2 + end + subgraph Struct2 + Swarm3 + end + subgraph Struct3 + Swarm4 + Swarm5 + end +``` + +### Directory Breakdown + +The directory structure of the `MixtureOfAgents` framework is organized to support its hierarchical architecture: + +```sh +MixtureOfAgents/ +├── agents/ +├── artifacts/ +├── marketplace/ +├── memory/ +├── models/ +├── prompts/ +├── schemas/ +├── structs/ +├── telemetry/ +├── tools/ +├── utils/ +└── __init__.py +``` + +### Summary + +The `MixtureOfAgents` framework is designed to facilitate complex multi-agent interactions through a structured and layered approach. By leveraging foundational components like models, tools, and memory, individual agents are empowered to perform specialized tasks. These agents are then coordinated within swarms to achieve collective goals, and swarms are managed within high-level structs to orchestrate sophisticated workflows. + +This hierarchical design ensures scalability, flexibility, and robustness, making the `MixtureOfAgents` framework a powerful tool for various applications in AI, data analysis, optimization, and beyond. \ No newline at end of file From b854e32f07724dd8a238dfc6e21098706856a75c Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 13:01:17 -0700 Subject: [PATCH 13/13] [CLEANUP] --- docs/swarms/framework_structure.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/swarms/framework_structure.md b/docs/swarms/framework_structure.md index 7540a71a..0b298bf3 100644 --- a/docs/swarms/framework_structure.md +++ b/docs/swarms/framework_structure.md @@ -1,6 +1,6 @@ -## MixtureOfAgents Framework Conceptual Breakdown +## Swarms Framework Conceptual Breakdown -The `MixtureOfAgents` framework is a sophisticated structure designed to orchestrate the collaborative work of multiple agents in a hierarchical manner. This breakdown provides a conceptual and visual representation of the framework, highlighting the interactions between models, tools, memory, agents, and swarms. +The `swarms` framework is a sophisticated structure designed to orchestrate the collaborative work of multiple agents in a hierarchical manner. This breakdown provides a conceptual and visual representation of the framework, highlighting the interactions between models, tools, memory, agents, and swarms. ### Hierarchical Structure @@ -13,7 +13,7 @@ The framework can be visualized as a multi-layered hierarchy: ### Visual Representation -Below are visual graphs illustrating the hierarchical and tree structure of the `MixtureOfAgents` framework. +Below are visual graphs illustrating the hierarchical and tree structure of the `swarms` framework. #### 1. Foundational Components: Models, Tools, Memory @@ -102,10 +102,10 @@ graph TD; ### Directory Breakdown -The directory structure of the `MixtureOfAgents` framework is organized to support its hierarchical architecture: +The directory structure of the `swarms` framework is organized to support its hierarchical architecture: ```sh -MixtureOfAgents/ +swarms/ ├── agents/ ├── artifacts/ ├── marketplace/ @@ -122,6 +122,6 @@ MixtureOfAgents/ ### Summary -The `MixtureOfAgents` framework is designed to facilitate complex multi-agent interactions through a structured and layered approach. By leveraging foundational components like models, tools, and memory, individual agents are empowered to perform specialized tasks. These agents are then coordinated within swarms to achieve collective goals, and swarms are managed within high-level structs to orchestrate sophisticated workflows. +The `swarms` framework is designed to facilitate complex multi-agent interactions through a structured and layered approach. By leveraging foundational components like models, tools, and memory, individual agents are empowered to perform specialized tasks. These agents are then coordinated within swarms to achieve collective goals, and swarms are managed within high-level structs to orchestrate sophisticated workflows. -This hierarchical design ensures scalability, flexibility, and robustness, making the `MixtureOfAgents` framework a powerful tool for various applications in AI, data analysis, optimization, and beyond. \ No newline at end of file +This hierarchical design ensures scalability, flexibility, and robustness, making the `swarms` framework a powerful tool for various applications in AI, data analysis, optimization, and beyond. \ No newline at end of file