From f2ac193e3fd67723917a31b3a4242671b3d4c77e Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Tue, 11 Jun 2024 11:49:01 -0700 Subject: [PATCH] [demo] --- docs/mkdocs.yml | 23 +- .../structs/multi_agent_orchestration.md | 15 + playground/demos/octomology_swarm/api.py | 115 +- playground/weatherman_agent/.env.example | 5 + playground/weatherman_agent/.gitignore | 204 +++ playground/weatherman_agent/README.md | 112 ++ playground/weatherman_agent/api.py | 119 ++ .../weatherman_agent/datasets/examples.csv | 40 + .../datasets/rain_weather_prompts.csv | 26 + .../datasets/weatherman_agent_LLM_prompts.csv | 26 + .../weatherman_agent/docs/llama3_hosted.md | 112 ++ .../weatherman_agent/docs/weather_agent.md | 113 ++ .../examples/baron_tool_with_swarms_tool.py | 30 + .../examples/llama_3_hosted_swarms.py | 19 + .../weatherman_agent/examples/llama_agent.py | 34 + .../weatherman_agent/examples/tool_schemas.py | 35 + playground/weatherman_agent/pyproject.toml | 55 + playground/weatherman_agent/requirements.txt | 18 + .../weatherman_agent/scripts/Dockerfile | 28 + playground/weatherman_agent/scripts/setup.sh | 0 .../tests/test_baron_tools.py | 56 + .../weatherman_agent/tests/test_llama3.py | 41 + .../tests/tests_weather_agent.py | 161 +++ .../weatherman_agent/todo/director_agent.py | 279 ++++ .../weatherman_agent/todo/worker_agents.py | 269 ++++ playground/weatherman_agent/weather_agent.py | 50 + .../weather_swarm/__init__.py | 0 .../weatherman_agent/weather_swarm/prompts.py | 152 ++ .../weather_swarm/tools/__init__.py | 0 .../weather_swarm/tools/baron_tools_schema.py | 145 ++ .../tools/get_geo_coordinates.py | 109 ++ .../weather_swarm/tools/tools.py | 1281 +++++++++++++++++ swarms/structs/concat.py | 24 + swarms/structs/mixture_of_agents.py | 159 ++ 34 files changed, 3784 insertions(+), 71 deletions(-) create mode 100644 docs/swarms/structs/multi_agent_orchestration.md create mode 100644 playground/weatherman_agent/.env.example create mode 100644 playground/weatherman_agent/.gitignore create mode 100644 playground/weatherman_agent/README.md create mode 100644 playground/weatherman_agent/api.py create mode 100644 playground/weatherman_agent/datasets/examples.csv create mode 100644 playground/weatherman_agent/datasets/rain_weather_prompts.csv create mode 100644 playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv create mode 100644 playground/weatherman_agent/docs/llama3_hosted.md create mode 100644 playground/weatherman_agent/docs/weather_agent.md create mode 100644 playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py create mode 100644 playground/weatherman_agent/examples/llama_3_hosted_swarms.py create mode 100644 playground/weatherman_agent/examples/llama_agent.py create mode 100644 playground/weatherman_agent/examples/tool_schemas.py create mode 100644 playground/weatherman_agent/pyproject.toml create mode 100644 playground/weatherman_agent/requirements.txt create mode 100644 playground/weatherman_agent/scripts/Dockerfile create mode 100644 playground/weatherman_agent/scripts/setup.sh create mode 100644 playground/weatherman_agent/tests/test_baron_tools.py create mode 100644 playground/weatherman_agent/tests/test_llama3.py create mode 100644 playground/weatherman_agent/tests/tests_weather_agent.py create mode 100644 playground/weatherman_agent/todo/director_agent.py create mode 100644 playground/weatherman_agent/todo/worker_agents.py create mode 100644 playground/weatherman_agent/weather_agent.py create mode 100644 playground/weatherman_agent/weather_swarm/__init__.py create mode 100644 playground/weatherman_agent/weather_swarm/prompts.py create mode 100644 playground/weatherman_agent/weather_swarm/tools/__init__.py create mode 100644 playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py create mode 100644 playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py create mode 100644 playground/weatherman_agent/weather_swarm/tools/tools.py create mode 100644 swarms/structs/concat.py create mode 100644 swarms/structs/mixture_of_agents.py diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 18c6a0a8..3afa0044 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -1,4 +1,3 @@ - docs_dir: '.' # replace with the correct path if your documentation files are not in the same directory as mkdocs.yml site_name: Swarms Documentation site_url: https://swarms.apac.ai @@ -90,10 +89,22 @@ markdown_extensions: - footnotes nav: - Home: - - Overview: "index.md" - - Install: "swarms/install/install.md" - - Docker Setup: "swarms/install/docker_setup.md" + - Overview: "index.md" + - Install: "swarms/install/install.md" + - Docker Setup: "swarms/install/docker_setup.md" + - Contributing: "contributing.md" - Framework: + - Overview: "swarms/" + - Models: "swarms/models/index.md" + - Agents: + - Build Agents: "swarms/structs/diy_your_own_agent.md" + - Agents with Memory: "swarms/memory/diy_memory.md" + - Agents with tools: "swarms/tools/main.md" + # - Integrating Agents from Langchain, CrewAI, and Autogen: "swarms" + - Multi-Agent Collaboration: + - Overview: "swarms/structs/multi_agent_orchestration.md" + - Workflows: "swarms/structs/workflows.md" + - Reference: - Overview: "swarms/index.md" - Models: - How to Create A Custom Language Model: "swarms/models/custom_model.md" @@ -157,6 +168,4 @@ nav: - SequentialWorkflow: "examples/reliable_autonomous_agents.md" - References: - Agent Glossary: "swarms/glossary.md" - - List of The Best Multi-Agent Papers: "swarms/papers.md" - - Contributors: - - Contributing: "contributing.md" + - List of The Best Multi-Agent Papers: "swarms/papers.md" \ No newline at end of file diff --git a/docs/swarms/structs/multi_agent_orchestration.md b/docs/swarms/structs/multi_agent_orchestration.md new file mode 100644 index 00000000..80dedff3 --- /dev/null +++ b/docs/swarms/structs/multi_agent_orchestration.md @@ -0,0 +1,15 @@ +# Multi-Agent Orchestration: +Swarms was designed to faciliate the communication between many different and specialized agents from a vast array of other frameworks such as langchain, autogen, crew, and more. + +In traditional swarm theory, there are many types of swarms usually for very specialized use-cases and problem sets. Such as Hiearchical and sequential are great for accounting and sales, because there is usually a boss coordinator agent that distributes a workload to other specialized agents. + + +| **Name** | **Description** | **Code Link** | **Use Cases** | +|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------|---------------------------------------------------------------------------------------------------| +| Hierarchical Swarms | A system where agents are organized in a hierarchy, with higher-level agents coordinating lower-level agents to achieve complex tasks. | [Code Link](#) | Manufacturing process optimization, multi-level sales management, healthcare resource coordination | +| Agent Rearrange | A setup where agents rearrange themselves dynamically based on the task requirements and environmental conditions. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | Adaptive manufacturing lines, dynamic sales territory realignment, flexible healthcare staffing | +| Concurrent Workflows | Agents perform different tasks simultaneously, coordinating to complete a larger goal. | [Code Link](#) | Concurrent production lines, parallel sales operations, simultaneous patient care processes | +| Sequential Coordination | Agents perform tasks in a specific sequence, where the completion of one task triggers the start of the next. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) | Step-by-step assembly lines, sequential sales processes, stepwise patient treatment workflows | +| Parallel Processing | Agents work on different parts of a task simultaneously to speed up the overall process. | [Code Link](#) | Parallel data processing in manufacturing, simultaneous sales analytics, concurrent medical tests | + + diff --git a/playground/demos/octomology_swarm/api.py b/playground/demos/octomology_swarm/api.py index d826b4e4..203ba051 100644 --- a/playground/demos/octomology_swarm/api.py +++ b/playground/demos/octomology_swarm/api.py @@ -1,13 +1,11 @@ import os from dotenv import load_dotenv -from fastapi.responses import JSONResponse from pydantic import BaseModel, Field from swarms import Agent from swarms.models import OpenAIChat from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.structs.rearrange import AgentRearrange -from fastapi import FastAPI from typing import Optional, List, Dict, Any # Load the environment variables @@ -25,7 +23,7 @@ openai = OpenAIChat( # Setup the FastAPI app -app = FastAPI() +# app = FastAPI() def DIAGNOSIS_SYSTEM_PROMPT() -> str: @@ -120,64 +118,53 @@ class RunConfig(BaseModel): max_loops: Optional[int] = 1 -@app.get("/v1/health") -async def health_check(): - return JSONResponse(content={"status": "healthy"}) - - -@app.get("/v1/models_available") -async def models_available(): - available_models = { - "models": [ - {"name": "gpt-4-1106-vision-preview", "type": "vision"}, - {"name": "openai-chat", "type": "text"}, - ] - } - return JSONResponse(content=available_models) - - -@app.get("/v1/swarm/completions") -async def run_agents(run_config: RunConfig): - # Diagnoser agent - diagnoser = Agent( - # agent_name="Medical Image Diagnostic Agent", - agent_name="D", - system_prompt=DIAGNOSIS_SYSTEM_PROMPT(), - llm=llm, - max_loops=1, - autosave=True, - dashboard=True, - ) - - # Agent 2 the treatment plan provider - treatment_plan_provider = Agent( - # agent_name="Medical Treatment Recommendation Agent", - agent_name="T", - system_prompt=TREATMENT_PLAN_SYSTEM_PROMPT(), - llm=openai, - max_loops=1, - autosave=True, - dashboard=True, - ) - - # Agent 3 the re-arranger - rearranger = AgentRearrange( - agents=[diagnoser, treatment_plan_provider], - flow=run_config.flow, - max_loops=run_config.max_loops, - verbose=True, - ) - - # Run the rearranger - out = rearranger( - run_config.task, - image=run_config.image, - ) - - return JSONResponse(content=out) - - -if __name__ == "__main__": - import uvicorn - - uvicorn.run(app, host="0.0.0.0", port=8000) +# @app.get("/v1/health") +# async def health_check(): +# return JSONResponse(content={"status": "healthy"}) + + +# @app.get("/v1/models_available") +# async def models_available(): +# available_models = { +# "models": [ +# {"name": "gpt-4-1106-vision-preview", "type": "vision"}, +# {"name": "openai-chat", "type": "text"}, +# ] +# } +# return JSONResponse(content=available_models) + + +# @app.get("/v1/swarm/completions") +# async def run_agents(run_config: RunConfig): +# Diagnoser agent +diagnoser = Agent( + # agent_name="Medical Image Diagnostic Agent", + agent_name="D", + system_prompt=DIAGNOSIS_SYSTEM_PROMPT(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=True, +) + +# Agent 2 the treatment plan provider +treatment_plan_provider = Agent( + # agent_name="Medical Treatment Recommendation Agent", + agent_name="T", + system_prompt=TREATMENT_PLAN_SYSTEM_PROMPT(), + llm=openai, + max_loops=1, + autosave=True, + dashboard=True, +) + +# Agent 3 the re-arranger +rearranger = AgentRearrange( + agents=[diagnoser, treatment_plan_provider], + flow="D -> T", + max_loops=1, + verbose=True, +) + +# Run the agents +results = rearranger.run("") diff --git a/playground/weatherman_agent/.env.example b/playground/weatherman_agent/.env.example new file mode 100644 index 00000000..0fa1a6b7 --- /dev/null +++ b/playground/weatherman_agent/.env.example @@ -0,0 +1,5 @@ +ANTHROPIC_API_KEY="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" +SWARMS_API_KEY="GET YOUR KEY AT https://swarms.world/account" +BARON_API_HOST="http://api.velocityweather.com/v1" +BARON_ACCESS_KEY="Y5lHXZfgce7P" +BARON_ACCESS_KEY_SECRET="rcscpInzyLuweENUjUtFDmqLkK1N0EPeaWQRjy7er1"] \ No newline at end of file diff --git a/playground/weatherman_agent/.gitignore b/playground/weatherman_agent/.gitignore new file mode 100644 index 00000000..97476ea2 --- /dev/null +++ b/playground/weatherman_agent/.gitignore @@ -0,0 +1,204 @@ +__pycache__/ +.venv/ + +.env + +image/ +audio/ +video/ +dataframe/ + +static/generated +runs +chroma +Weather Director Agent_state.json +Unit Testing Agent_state.json +Devin_state.json +swarms/__pycache__ +artifacts +transcript_generator.json +venv +.DS_Store +Cargo.lock +.DS_STORE +Cargo.lock +swarms/agents/.DS_Store +artifacts_two +logs +_build +conversation.txt +t1_state.json +stderr_log.txt +t2_state.json +.vscode +.DS_STORE +# Byte-compiled / optimized / DLL files +Transcript Generator_state.json +__pycache__/ +*.py[cod] +*$py.class +.grit +swarm-worker-01_state.json +error.txt +Devin Worker 2_state.json +# C extensions +*.so +.ruff_cache + + +errors.txt + +Autonomous-Agent-XYZ1B_state.json +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py +.DS_Store +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ +.vscode/settings.json diff --git a/playground/weatherman_agent/README.md b/playground/weatherman_agent/README.md new file mode 100644 index 00000000..4a932029 --- /dev/null +++ b/playground/weatherman_agent/README.md @@ -0,0 +1,112 @@ +# Baron Weather + +## Overview +Baron Weather is a sophisticated toolset designed to enable real-time querying of weather data using the Baron API. It utilizes a swarm of autonomous agents to handle concurrent data requests, optimizing for efficiency and accuracy in weather data retrieval and analysis. + +## Features +Baron Weather includes the following key features: +- **Real-time Weather Data Access**: Instantly fetch and analyze weather conditions using the Baron API. +- **Autonomous Agents**: A swarm system for handling multiple concurrent API queries efficiently. +- **Data Visualization**: Tools for visualizing complex meteorological data for easier interpretation. + + +## Prerequisites +Before you begin, ensure you have met the following requirements: +- Python 3.10 or newer +- git installed on your machine +- Install packages like swarms + +## Installation + +There are 2 methods, git cloning which allows you to modify the codebase or pip install for simple usage: + +### Pip +`pip3 install -U weather-swarm` + +### Cloning the Repository +To get started with Baron Weather, clone the repository to your local machine using: + +```bash +git clone https://github.com/baronservices/weatherman_agent.git +cd weatherman_agent +``` + +### Setting Up the Environment +Create a Python virtual environment to manage dependencies: + +```bash +python -m venv venv +source venv/bin/activate # On Windows use `venv\Scripts\activate` +``` + +### Installing Dependencies +Install the necessary Python packages via pip: + +```bash +pip install -r requirements.txt +``` + +## Usage +To start querying the Baron Weather API using the autonomous agents, run: + +```bash +python main.py +``` + +## API + +```bash +python3 api.py +``` + + +### Llama3 + +```python +from swarms import llama3Hosted + + +# Example usage +llama3 = llama3Hosted( + model="meta-llama/Meta-Llama-3-8B-Instruct", + temperature=0.8, + max_tokens=1000, + system_prompt="You are a helpful assistant.", +) + +completion_generator = llama3.run( + "create an essay on how to bake chicken" +) + +print(completion_generator) + +``` + +# Documentation +- [Llama3Hosted](docs/llama3_hosted.md) + +## Contributing +Contributions to Baron Weather are welcome and appreciated. Here's how you can contribute: + +1. Fork the Project +2. Create your Feature Branch (`git checkout -b feature/YourAmazingFeature`) +3. Commit your Changes (`git commit -m 'Add some YourAmazingFeature'`) +4. Push to the Branch (`git push origin feature/YourAmazingFeature`) +5. Open a Pull Request + + +## Tests +To run tests run the following: + +`pytest` + +## Contact +Project Maintainer - [Kye Gomez](mailto:kye@swarms.world) - [GitHub Profile](https://github.com/baronservices) + + +# Todo +- [ ] Add the schemas to the worker agents to output json +- [ ] Implement the parser and the function calling mapping to execute the functions +- [ ] Implement the HiearArchical Swarm and plug in and all the agents +- [ ] Then, implement the API server wrapping the hiearchical swarm +- [ ] Then, Deploy on the server 24/7 \ No newline at end of file diff --git a/playground/weatherman_agent/api.py b/playground/weatherman_agent/api.py new file mode 100644 index 00000000..f872afd4 --- /dev/null +++ b/playground/weatherman_agent/api.py @@ -0,0 +1,119 @@ +import os +import uuid +from typing import Any, Dict, List + +from dotenv import load_dotenv +from fastapi import FastAPI, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel +from swarms import Agent, OpenAIChat +from swarms.utils.loguru_logger import logger + +from weather_swarm.prompts import ( + FEW_SHORT_PROMPTS, + GLOSSARY_PROMPTS, + WEATHER_AGENT_SYSTEM_PROMPT, +) +from weather_swarm.tools.tools import ( + point_query, + request_ndfd_basic, + request_ndfd_hourly, +) + +load_dotenv() + +logger.info("Starting the API server..") +app = FastAPI(debug=True) + +# Load the middleware to handle CORS +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +class ChatRequest(BaseModel): + model: str + prompt: str + max_tokens: int = 100 + temperature: float = 1.0 + + +class ChatResponse(BaseModel): + id: str + object: str + created: int + model: str + choices: List[Dict[str, Any]] + usage: Dict[str, Any] + + +@app.get("/v1/health") +async def health_check(): + return {"status": "ok"} + + +@app.get("/v1/models") +async def get_models(): + return {"models": ["WeatherMan Agent"]} + + +@app.post("/v1/chat/completions", response_model=ChatResponse) +async def chat_completions(request: ChatRequest): + if request.model != "WeatherMan Agent": + raise HTTPException(status_code=400, detail="Model not found") + + # Initialize the WeatherMan Agent + agent = Agent( + agent_name="WeatherMan Agent", + system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, + sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], + llm=OpenAIChat( + openai_api_key=os.getenv("OPENAI_API_KEY"), + max_tokens=request.max_tokens, + temperature=request.temperature, + ), + max_loops=1, + # dynamic_temperature_enabled=True, + # verbose=True, + output_type=str, + metadata_output_type="json", + function_calling_format_type="OpenAI", + function_calling_type="json", + tools=[point_query, request_ndfd_basic, request_ndfd_hourly], + ) + + # Response from the agent + + try: + response = agent.run(request.prompt) + return { + "id": uuid.uuid4(), + "object": "text_completion", + "created": int(os.times().system), + "model": agent.agent_name, + "choices": [{"text": response}], + "usage": { + "prompt_tokens": len(request.prompt.split()), + "completion_tokens": len(response.split()), + "total_tokens": len(request.prompt.split()) + + len(response.split()), + }, + } + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +# Example of how to run the FastAPI app +def deploy_app(host: str = "0.0.0.0", port: int = 8000): + import uvicorn + + uvicorn.run(app, host=host, port=port) + + +# Run the FastAPI app +if __name__ == "__main__": + deploy_app() diff --git a/playground/weatherman_agent/datasets/examples.csv b/playground/weatherman_agent/datasets/examples.csv new file mode 100644 index 00000000..d694d7ff --- /dev/null +++ b/playground/weatherman_agent/datasets/examples.csv @@ -0,0 +1,40 @@ +prompt,goal,required inputs,api example +What is the current temperature?,allow the user to request the current temperature for their location,user's location,"request_metar_nearest(""38"", ""-96"")" +Describe the current weather.,have the LLM construct a narrative weather description based on current conditions,user's location,"request_metar_nearest(""38"", ""-96"")" +How much rain fell at my location?,allow the user to determine how much rain has accumulated at their location in the last 24 hours,user's location,"point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)" +Is it going to be sunny tomorrow?,allow the user to determine cloud coverage for their location ,user's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" +Is rain expected at my location in the next 6 hours? ,allow the user to determine if precip will fall in the coming hours,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +What is the max forecasted temperature today? ,allow the user to determine how hot or cold the air temp will be,user's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" +Will it be windy today? ,allow the user to determine the max wind speed for that day,user's location,"point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)" +,,, +How much rain fell at my location on date/time?,,"user's location, date/time", +What dates did hail fall at my location during x time range? ,allow the user to request a list of dates at which hail fell at their location,"user's location, date range", +Is it good weather to spray fertilizer? ,,, +How will the weather today impact solar panel performance? ,,, +Will my soccer game get rained out this evening? ,"determine if rain will impact my location ""this evening""","user's location, current date","point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is it safe to go hiking today based on the weather? ,"check for high wind or rain forecast, perhaps extreme heat and cold","hiking location, current date", +What is the liklihood of frost tonight? ,are forecast conditions right for frost,"location, date", +What time will be the hottest part of the day tomorrow? ,determine highest forecast heat index tomorrow,"location, tomorrow's date", +When is it forecasted to rain again at my house? ,"use forecast precip rate or max reflectivity and/or accums to see if rain is forecasted in the next 3 days. If not, swap to GFS for days 4-14",location, +How will the weather impact my flight today? ,check against conditions commonly associated with flight delays,location/time of departure at airport, +Are there any flood warnings in my area? ,check against current watch/warning map,location, +How will the weather affect road conditions and traffic safety tomorrow morning?,"check forecasted road conditions, perhaps check for heavy precip rate, high accums, snow depth",location/route, +When was the last time it rained at my location? ,"use historical rainfall, weather inspector?","location, date range", +,,, +,,, +What's the highest temperature in United States right now?,determine the highest current temperature in the US,search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53 + +discard all METARs that do not begin with the letter K +" +What's the lowest temperature in United States right now?,determine the lowest current temperature in the US,search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53 + +discard all METARs that do not begin with the letter K +" +What's the highest temperature in the world right now?,determine the highest current temperature in the world,search all METARs, +What's the lowest temperature in the world right now?,determine the lowest current temperature in the world,search all METARs,https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/all.json?page=1&ts=1716776520&sig=LOC_xB0tt3qtoqmL8iy6wtguLXI= +,,, +,,, +,,, +,,, +,,, +Weather inspector tie in???,,, \ No newline at end of file diff --git a/playground/weatherman_agent/datasets/rain_weather_prompts.csv b/playground/weatherman_agent/datasets/rain_weather_prompts.csv new file mode 100644 index 00000000..6f9a6276 --- /dev/null +++ b/playground/weatherman_agent/datasets/rain_weather_prompts.csv @@ -0,0 +1,26 @@ +prompt,goal,required inputs,api example +How much rain fell at my location?,allow the user to determine how much rain has accumulated at their location in the last 24 hours,user's location,"point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)" +Is rain expected at my location in the next 6 hours?,"allow the user to determine if precip will fall in the coming hours, forecast query",user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +How much rain fell at my location on date/time?,historical query,"user's location, date/time",https://api.velocityweather.com/v1/cLRlLroVhajP/point/north-american-radar/Mask1-Mercator/2024-05-08T21%3A14%3A43Z.json?lat=35.505400093441324&lon=-87.60498046875&ts=1717294800&sig=_mCs5_XfZKQon55AzSGPI7dtoHY= +Will my soccer game get rained out this evening?,"determine if rain will impact my location ""this evening"", forecast query","user's location, current date","point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +When is it forecasted to rain again at my house?,"use forecast precip rate or max reflectivity and/or accums to see if rain is forecasted in the next 3 days. If not, swap to GFS for days 4-14",user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +When was the last time it rained at my location?,"use historical rainfall, weather inspector?","location, date range", +Is there any chance of rain during my commute today?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Should I bring an umbrella for my walk this afternoon?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will it rain for the outdoor concert tonight?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will it rain during my barbecue this weekend?,forecast query,location of bbq,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is there a storm expected in my area today?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will it rain on my drive to work tomorrow morning?,forecast query,user's location + work location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Are there any rain showers predicted for this afternoon?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Should I expect rain during my jog this evening?,forecast query,user's location.....where will they jog? will the LLM prompt?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +How likely is it to rain during my picnic at the park?,forecast query,user's location .... will the LLM prompt for the picnic location?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is rain expected when I plan to leave for the airport?,forecast query,user's location....will the LLM prompt for the location they'll depart from?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will the weather be dry for my cycling trip today?,forecast query,"location of cycling trip, starting point","point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is rain in the forecast for my beach outing tomorrow?,forecast query,location of beach,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will it rain during my son's baseball game tonight?,forecast query,location of baseball game,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Should I prepare for rain on my camping trip this weekend?,forecast query,location of campsite....will the LLM prompt for the campsite location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +What’s the rain forecast for my neighborhood today?,forecast query,neighbourhood location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is there any rainfall expected while I'm gardening this afternoon?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +How heavy is the rain expected to be tonight?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Are there any rainstorms predicted during my road trip?,forecast query,can the LLM prompt for location/route of the road trip? should we state we don't support multi-location prompts? can we pull this off?,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Will there be rain showers in my area over the next few days?,forecast query,user's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" diff --git a/playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv b/playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv new file mode 100644 index 00000000..4dfef6c7 --- /dev/null +++ b/playground/weatherman_agent/datasets/weatherman_agent_LLM_prompts.csv @@ -0,0 +1,26 @@ +prompt,goal,required inputs,api example +What is the current temperature?,Allow the user to request the current temperature for their location,User's location,"request_metar_nearest(""38"", ""-96"")" +Describe the current weather.,Have the LLM construct a narrative weather description based on current conditions,User's location,"request_metar_nearest(""38"", ""-96"")" +How much rain fell at my location?,Allow the user to determine how much rain has accumulated at their location in the last 24 hours,User's location,"point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)" +Is it going to be sunny tomorrow?,Allow the user to determine cloud coverage for their location,User's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" +Is rain expected at my location in the next 6 hours?,Allow the user to determine if precipitation will fall in the coming hours,User's location,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +What is the max forecasted temperature today?,Allow the user to determine how hot or cold the air temp will be,User's location,"request_ndfd_basic(34.730301, -86.586098, forecast_time)" +Will it be windy today?,Allow the user to determine the max wind speed for that day,User's location,"point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)" +How much rain fell at my location on date/time?,,,User's location, date/time +What dates did hail fall at my location during x time range?,Allow the user to request a list of dates at which hail fell at their location,User's location, date range +Is it good weather to spray fertilizer?,,,, +How will the weather today impact solar panel performance?,,,, +Will my soccer game get rained out this evening?,Determine if rain will impact my location "this evening",User's location, current date,"point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)" +Is it safe to go hiking today based on the weather?,Check for high wind or rain forecast, perhaps extreme heat and cold,Hiking location, current date +What is the likelihood of frost tonight?,Are forecast conditions right for frost,Location, date +What time will be the hottest part of the day tomorrow?,Determine highest forecast heat index tomorrow,Location, tomorrow's date +When is it forecasted to rain again at my house?,Use forecast precip rate or max reflectivity and/or accums to see if rain is forecasted in the next 3 days. If not, swap to GFS for days 4-14,Location, +How will the weather impact my flight today?,Check against conditions commonly associated with flight delays,Location/time of departure at airport, +Are there any flood warnings in my area?,Check against current watch/warning map,Location, +How will the weather affect road conditions and traffic safety tomorrow morning?,Check forecasted road conditions, perhaps check for heavy precip rate, high accums, snow depth,Location/route, +When was the last time it rained at my location?,Use historical rainfall, weather inspector?,Location, date range +What's the highest temperature in United States right now?,Determine the highest current temperature in the US,Search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53" +What's the lowest temperature in United States right now?,Determine the lowest current temperature in the US,Search all METARs in CONUS,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/region.json?page=1&ts=1716776160&sig=TV6DX0DD3GrrGlSQV9Ia16c7xzs=&n_lat=52&s_lat=20&w_lon=-131&e_lon=-53" +What's the highest temperature in the world right now?,Determine the highest current temperature in the world,Search all METARs,https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/all.json?page=1&ts=1716776520&sig=LOC_xB0tt3qtoqmL8iy6wtguLXI= +What's the lowest temperature in the world right now?,Determine the lowest current temperature in the world,Search all METARs,"https://api.velocityweather.com/v1/V4BRIfHvCw7P/reports/metar/all.json?page=1&ts=1716776520&sig=LOC_xB0tt3qtoqmL8iy6wtguLXI=" +Weather inspector tie in???,,,, diff --git a/playground/weatherman_agent/docs/llama3_hosted.md b/playground/weatherman_agent/docs/llama3_hosted.md new file mode 100644 index 00000000..9fd770f4 --- /dev/null +++ b/playground/weatherman_agent/docs/llama3_hosted.md @@ -0,0 +1,112 @@ +# llama3Hosted Documentation + +## Overview + +The `llama3Hosted` class is a high-level interface for interacting with a hosted version of the Llama3 model. This class is designed to simplify the process of generating responses from the Llama3 model by providing an easy-to-use interface for sending requests and receiving responses. The Llama3 model is a state-of-the-art language model developed by Meta, known for its ability to generate human-like text based on the input it receives. + +### Key Features + +- **Model Customization**: Allows the user to specify which version of the Llama3 model to use. +- **Temperature Control**: Adjusts the randomness of the generated responses. +- **Token Limitation**: Sets a limit on the maximum number of tokens in the generated response. +- **System Prompt**: Defines the initial context for the conversation, guiding the model's responses. + +## Purpose + +The `llama3Hosted` class is designed to provide developers with a straightforward way to utilize the capabilities of the Llama3 model without dealing with the complexities of model hosting and API management. It is particularly useful for applications that require natural language understanding and generation, such as chatbots, virtual assistants, and content generation tools. + +## Class Definition + +### llama3Hosted Parameters + +| Parameter | Type | Default | Description | +|----------------|--------|-----------------------------------------|--------------------------------------------------------------| +| `model` | `str` | `"meta-llama/Meta-Llama-3-8B-Instruct"` | The name or path of the Llama3 model to use. | +| `temperature` | `float`| `0.8` | The temperature parameter for generating responses. | +| `max_tokens` | `int` | `4000` | The maximum number of tokens in the generated response. | +| `system_prompt`| `str` | `"You are a helpful assistant."` | The system prompt to use for generating responses. | +| `*args` | | | Variable length argument list. | +| `**kwargs` | | | Arbitrary keyword arguments. | + +### Attributes + +| Attribute | Type | Description | +|----------------|--------|--------------------------------------------------------------| +| `model` | `str` | The name or path of the Llama3 model. | +| `temperature` | `float`| The temperature parameter for generating responses. | +| `max_tokens` | `int` | The maximum number of tokens in the generated response. | +| `system_prompt`| `str` | The system prompt for generating responses. | + +## Method: run + +### Parameters + +| Parameter | Type | Description | +|-----------|--------|-----------------------------------| +| `task` | `str` | The user's task or input. | +| `*args` | | Variable length argument list. | +| `**kwargs`| | Arbitrary keyword arguments. | + +### Returns + +| Type | Description | +|------|--------------------------------------------| +| `str`| The generated response from the Llama3 model.| + +### Usage Examples +First install weather_swarm with: + +`$ pip install -U weather-swarm` + + +#### Example 1: Basic Usage + +```python +from weather_swarmn import llama3Hosted + +llama = llama3Hosted() +response = llama.run("Tell me a joke.") +print(response) +``` + +#### Example 2: Custom Model and Parameters + +```python +import requests +import json +from weather_swarmn import llama3Hosted + + +llama = llama3Hosted( + model="custom-llama-model", + temperature=0.5, + max_tokens=2000, + system_prompt="You are a witty assistant." +) +response = llama.run("What's the weather like today?") +print(response) +``` + +#### Example 3: Using Additional Arguments + +```python +from weather_swarmn import llama3Hosted + +llama = llama3Hosted() +response = llama.run("Write a short story.", custom_stop_tokens=[128002, 128003]) +print(response) +``` + +## Additional Information and Tips + +- **Temperature Parameter**: The temperature parameter controls the randomness of the model's output. Lower values (close to 0) make the output more deterministic, while higher values (up to 1) make it more random. +- **System Prompt**: Crafting an effective system prompt can significantly impact the quality and relevance of the model's responses. Ensure the prompt aligns well with the intended use case. +- **Error Handling**: Always include error handling when making API requests to ensure your application can gracefully handle any issues that arise. + +## References and Resources + +- [Llama3 Model Documentation](https://github.com/facebookresearch/llama) +- [Requests Library Documentation](https://docs.python-requests.org/en/latest/) +- [JSON Library Documentation](https://docs.python.org/3/library/json.html) + +This documentation provides a comprehensive overview of the `llama3Hosted` class, its parameters, attributes, methods, and usage examples. By following this guide, developers can effectively integrate and utilize the Llama3 model in their applications. \ No newline at end of file diff --git a/playground/weatherman_agent/docs/weather_agent.md b/playground/weatherman_agent/docs/weather_agent.md new file mode 100644 index 00000000..58bf7ad0 --- /dev/null +++ b/playground/weatherman_agent/docs/weather_agent.md @@ -0,0 +1,113 @@ +## Weather Agent API Documentation + +### Overview +The Weather Agent API provides endpoints to interact with a weather prediction model, "WeatherMan Agent". This API allows users to get weather-related information through chat completions using the OpenAI GPT model with specific prompts and tools. + +### Base URL +``` +http://localhost:8000 +``` + +### Endpoints + +#### Health Check + +##### `GET /v1/health` +Checks the health status of the API. + +**Response:** +- `200 OK`: Returns a JSON object indicating the status of the API. + ```json + { + "status": "ok" + } + ``` + +#### Get Models + +##### `GET /v1/models` +Retrieves the list of available models. + +**Response:** +- `200 OK`: Returns a JSON object with the list of models. + ```json + { + "models": ["WeatherMan Agent"] + } + ``` + +#### Chat Completions + +##### `POST /v1/chat/completions` +Generates weather-related responses based on the provided prompt using the "WeatherMan Agent" model. + +**Request Body:** +- `model` (string): The name of the model to use. Must be "WeatherMan Agent". +- `prompt` (string): The input prompt for the chat completion. +- `max_tokens` (integer, optional): The maximum number of tokens to generate. Default is 100. +- `temperature` (float, optional): The sampling temperature for the model. Default is 1.0. + +**Example Request:** +```json +{ + "model": "WeatherMan Agent", + "prompt": "What will the weather be like tomorrow in New York?", + "max_tokens": 100, + "temperature": 1.0 +} +``` + +**Response:** +- `200 OK`: Returns a JSON object with the completion result. + ```json + { + "id": "unique-id", + "object": "text_completion", + "created": 1234567890, + "model": "WeatherMan Agent", + "choices": [ + { + "text": "The weather tomorrow in New York will be..." + } + ], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 15, + "total_tokens": 25 + } + } + ``` +- `400 Bad Request`: If the model specified is not "WeatherMan Agent". + ```json + { + "detail": "Model not found" + } + ``` +- `500 Internal Server Error`: If there is an error processing the request. + ```json + { + "detail": "Error message" + } + ``` + +### Models +The API supports the following model: +- **WeatherMan Agent**: A specialized agent for providing weather-related information based on the prompt. + +### Usage + +1. **Health Check:** Verify that the API is running by sending a GET request to `/v1/health`. +2. **Get Models:** Retrieve the list of available models by sending a GET request to `/v1/models`. +3. **Chat Completions:** Generate a weather-related response by sending a POST request to `/v1/chat/completions` with the required parameters. + +### Error Handling +The API returns appropriate HTTP status codes and error messages for different error scenarios: +- `400 Bad Request` for invalid requests. +- `500 Internal Server Error` for unexpected errors during processing. + +### CORS Configuration +The API allows cross-origin requests from any origin, supporting all methods and headers. + +--- + +For further assistance or issues, please contact the API support team. \ No newline at end of file diff --git a/playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py b/playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py new file mode 100644 index 00000000..b2ede198 --- /dev/null +++ b/playground/weatherman_agent/examples/baron_tool_with_swarms_tool.py @@ -0,0 +1,30 @@ +from weather_swarm.tools.tools import request_metar_nearest +from swarms import tool + + +@tool( + name="RequestMetarNearest", + description=( + "Requests the nearest METAR (Meteorological Aerodrome Report)" + " data based on the given latitude and longitude." + ), + return_string=False, + return_dict=False, +) +def request_metar_nearest_new(lat: float, lon: float): + """ + Requests the nearest METAR (Meteorological Aerodrome Report) data based on the given latitude and longitude. + + Args: + lat (float): The latitude of the location. + lon (float): The longitude of the location. + + Returns: + The METAR data for the nearest location. + """ + return request_metar_nearest(lat, lon) + + +out = request_metar_nearest_new(37.7749, -122.4194) +print(out) +print(type(out)) diff --git a/playground/weatherman_agent/examples/llama_3_hosted_swarms.py b/playground/weatherman_agent/examples/llama_3_hosted_swarms.py new file mode 100644 index 00000000..78292685 --- /dev/null +++ b/playground/weatherman_agent/examples/llama_3_hosted_swarms.py @@ -0,0 +1,19 @@ +from swarms import llama3Hosted + + +# Example usage +llama3 = llama3Hosted( + model="meta-llama/Meta-Llama-3-8B-Instruct", + temperature=0.8, + max_tokens=1000, + system_prompt=( + "You're a weather agent for Baron Weather, you specialize in" + " weather analysis" + ), +) + +completion_generator = llama3.run( + "What are the best weather conditions to lay concrete", +) + +print(completion_generator) diff --git a/playground/weatherman_agent/examples/llama_agent.py b/playground/weatherman_agent/examples/llama_agent.py new file mode 100644 index 00000000..6debdd38 --- /dev/null +++ b/playground/weatherman_agent/examples/llama_agent.py @@ -0,0 +1,34 @@ +from swarms import Agent +from swarms import llama3Hosted +from weather_swarm.prompts import GLOSSARY_PROMPTS +from weather_swarm.prompts import ( + FEW_SHORT_PROMPTS, + WEATHER_AGENT_SYSTEM_PROMPT, +) + + +# Purpose = To generate weather information for the user and send API requests to the Baron Weather API +agent = Agent( + agent_name="WeatherMan Agent", + system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, + sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], + # sop=list_tool_schemas_json, + llm=llama3Hosted( + max_tokens=2000, + temperature=0.1, + ), + max_loops="auto", + autosave=True, + dashboard=False, + streaming_on=True, + interactive=True, +) + +# Run the agent to generate the person's information +generated_data = agent.run( + "Based on the current humidity in Huntsville, how frizzy will my" + " hair get?" +) + +# Print the generated data +# print(f"Generated data: {generated_data}") diff --git a/playground/weatherman_agent/examples/tool_schemas.py b/playground/weatherman_agent/examples/tool_schemas.py new file mode 100644 index 00000000..3cb561c2 --- /dev/null +++ b/playground/weatherman_agent/examples/tool_schemas.py @@ -0,0 +1,35 @@ +from swarms import get_openai_function_schema_from_func + +from weather_swarm.tools.tools import ( + request_metar_nearest, + point_query, + request_ndfd_basic, + # point_query_region, + request_ndfd_hourly, +) + + +def get_schemas_for_funcs(funcs): + schemas = [] + for func in funcs: + name = str(func.__name__) + description = str(func.__doc__) + schema = get_openai_function_schema_from_func( + func, name=name, description=description + ) + schemas.append(str(schema)) + merged_schemas = "\n".join(schemas) + return merged_schemas + + +funcs = [ + request_metar_nearest, + point_query, + request_ndfd_basic, + # point_query_region, + request_ndfd_hourly, +] + +schemas = get_schemas_for_funcs(funcs) +print(schemas) +print(type(schemas)) diff --git a/playground/weatherman_agent/pyproject.toml b/playground/weatherman_agent/pyproject.toml new file mode 100644 index 00000000..5d38a19a --- /dev/null +++ b/playground/weatherman_agent/pyproject.toml @@ -0,0 +1,55 @@ +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "weather-swarm" +version = "0.0.6" +description = "Weather Swarm - Pytorch" +license = "MIT" +authors = ["Kye Gomez "] +homepage = "https://github.com/baronservices/weatherman_agent" +documentation = "https://github.com/baronservices/weatherman_agent" # Add this if you have documentation. +readme = "README.md" # Assuming you have a README.md +repository = "https://github.com/baronservices/weatherman_agent" +keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3.9" +] + +[tool.poetry.dependencies] +python = "^3.10" +swarms = "*" +pydantic = "2.7.1" + + + +[tool.poetry.group.lint.dependencies] +ruff = "^0.1.6" +types-toml = "^0.10.8.1" +types-redis = "^4.3.21.6" +types-pytz = "^2023.3.0.0" +black = "^23.1.0" +types-chardet = "^5.0.4.6" +mypy-protobuf = "^3.0.0" + + +[tool.autopep8] +max_line_length = 80 +ignore = "E501,W6" # or ["E501", "W6"] +in-place = true +recursive = true +aggressive = 3 + + +[tool.ruff] +line-length = 70 + +[tool.black] +line-length = 70 +target-version = ['py38'] +preview = true diff --git a/playground/weatherman_agent/requirements.txt b/playground/weatherman_agent/requirements.txt new file mode 100644 index 00000000..a26b8b84 --- /dev/null +++ b/playground/weatherman_agent/requirements.txt @@ -0,0 +1,18 @@ +swarms +pydantic==2.7.1 +base64==1.0.0 +datetime==4.3 +hashlib==20081119 +hmac==20151222 +shutil==1.7.0 +urllib3==1.26.7 +json5==0.9.6 +codecs==1.0.0 +fastapi +pytest +hydra +loguru +requests +opencv-python +beartype +termcolor diff --git a/playground/weatherman_agent/scripts/Dockerfile b/playground/weatherman_agent/scripts/Dockerfile new file mode 100644 index 00000000..7213ac11 --- /dev/null +++ b/playground/weatherman_agent/scripts/Dockerfile @@ -0,0 +1,28 @@ +# Use an official Python runtime as a parent image +FROM python:3.10-slim-buster + +# Set environment varibles +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONUNBUFFERED 1 + +# Set work directory +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + default-libmysqlclient-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install Python dependencies +COPY requirements.txt /app/ +RUN pip install --no-cache-dir -r requirements.txt + +# Copy project +COPY . /app/ + +# Expose port +EXPOSE 5000 + +# Run the application: +CMD ["gunicorn", "-w", "4", "-k", "gevent", "api:app"] \ No newline at end of file diff --git a/playground/weatherman_agent/scripts/setup.sh b/playground/weatherman_agent/scripts/setup.sh new file mode 100644 index 00000000..e69de29b diff --git a/playground/weatherman_agent/tests/test_baron_tools.py b/playground/weatherman_agent/tests/test_baron_tools.py new file mode 100644 index 00000000..4b21856a --- /dev/null +++ b/playground/weatherman_agent/tests/test_baron_tools.py @@ -0,0 +1,56 @@ +from unittest.mock import patch +from weather_swarm.tools.tools import ( + request_metar_nearest, + point_query, + request_ndfd_basic, +) + + +class TestWeatherFunctions: + @patch("your_module.request_metar_nearest") + def test_request_metar_nearest(self, mock_request_metar_nearest): + mock_request_metar_nearest.return_value = "expected_value" + result = request_metar_nearest("38", "-96") + assert result == "expected_value" + + @patch("your_module.point_query") + def test_point_query_precip_totalaccum(self, mock_point_query): + mock_point_query.return_value = "expected_value" + result = point_query( + "precip-totalaccum-24hr", "Standard-Mercator", -86.6, 34.4 + ) + assert result == "expected_value" + + @patch("your_module.point_query") + def test_point_query_baron_hires_maxreflectivity( + self, mock_point_query + ): + mock_point_query.return_value = "expected_value" + result = point_query( + "baron-hires-maxreflectivity-dbz-all", + "Mask1-Mercator", + -86.6, + 34.4, + ) + assert result == "expected_value" + + @patch("your_module.point_query") + def test_point_query_baron_hires_windspeed( + self, mock_point_query + ): + mock_point_query.return_value = "expected_value" + result = point_query( + "baron-hires-windspeed-mph-10meter", + "Standard-Mercator", + -86.6, + 34.4, + ) + assert result == "expected_value" + + @patch("your_module.request_ndfd_basic") + def test_request_ndfd_basic(self, mock_request_ndfd_basic): + mock_request_ndfd_basic.return_value = "expected_value" + result = request_ndfd_basic( + 34.730301, -86.586098, "forecast_time" + ) + assert result == "expected_value" diff --git a/playground/weatherman_agent/tests/test_llama3.py b/playground/weatherman_agent/tests/test_llama3.py new file mode 100644 index 00000000..2e98c03d --- /dev/null +++ b/playground/weatherman_agent/tests/test_llama3.py @@ -0,0 +1,41 @@ +from unittest.mock import Mock, patch +from swarms import llama3Hosted + + +class TestLlama3Hosted: + def setup_method(self): + self.llama = llama3Hosted() + + def test_init(self): + assert ( + self.llama.model == "meta-llama/Meta-Llama-3-8B-Instruct" + ) + assert self.llama.temperature == 0.8 + assert self.llama.max_tokens == 4000 + assert ( + self.llama.system_prompt == "You are a helpful assistant." + ) + + @patch("requests.request") + def test_run(self, mock_request): + mock_response = Mock() + expected_result = "Test response" + mock_response.json.return_value = { + "choices": [{"message": {"content": expected_result}}] + } + mock_request.return_value = mock_response + + result = self.llama.run("Test task") + assert result == expected_result + mock_request.assert_called_once_with( + "POST", + "http://34.204.8.31:30001/v1/chat/completions", + headers={"Content-Type": "application/json"}, + data=( + '{"model": "meta-llama/Meta-Llama-3-8B-Instruct",' + ' "messages": [{"role": "system", "content": "You are' + ' a helpful assistant."}, {"role": "user", "content":' + ' "Test task"}], "stop_token_ids": [128009, 128001],' + ' "temperature": 0.8, "max_tokens": 4000}' + ), + ) diff --git a/playground/weatherman_agent/tests/tests_weather_agent.py b/playground/weatherman_agent/tests/tests_weather_agent.py new file mode 100644 index 00000000..891da6a6 --- /dev/null +++ b/playground/weatherman_agent/tests/tests_weather_agent.py @@ -0,0 +1,161 @@ +import os +import pytest +from dotenv import load_dotenv +from weather_swarm import Agent +from weather_swarm.prompts import ( + WEATHER_AGENT_SYSTEM_PROMPT, + GLOSSARY_PROMPTS, + FEW_SHORT_PROMPTS, +) +from weather_swarm.tools.tools import ( + point_query, + request_ndfd_basic, + request_ndfd_hourly, +) +from swarms import OpenAIChat +from unittest.mock import Mock, patch + +# Load environment variables for tests +load_dotenv() + + +# Fixtures +@pytest.fixture +def weather_agent(): + return Agent( + agent_name="WeatherMan Agent", + system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, + sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], + llm=OpenAIChat(), + max_loops=1, + dynamic_temperature_enabled=True, + verbose=True, + output_type=str, + tools=[point_query, request_ndfd_basic, request_ndfd_hourly], + docs_folder="datasets", + metadata="json", + function_calling_format_type="OpenAI", + function_calling_type="json", + ) + + +# Test Environment Loading +def test_load_dotenv(): + assert ( + "API_KEY" in os.environ + ), "API_KEY not found in environment variables" + assert ( + "API_SECRET" in os.environ + ), "API_SECRET not found in environment variables" + + +# Test Agent Initialization +def test_agent_initialization(weather_agent): + assert weather_agent.agent_name == "WeatherMan Agent" + assert weather_agent.system_prompt == WEATHER_AGENT_SYSTEM_PROMPT + assert weather_agent.llm is not None + assert len(weather_agent.tools) == 3 + assert weather_agent.max_loops == 1 + assert weather_agent.dynamic_temperature_enabled is True + assert weather_agent.verbose is True + assert weather_agent.output_type == str + assert weather_agent.docs_folder == "datasets" + assert weather_agent.metadata == "json" + assert weather_agent.function_calling_format_type == "OpenAI" + assert weather_agent.function_calling_type == "json" + + +# Parameterized Testing for Agent Tools +@pytest.mark.parametrize( + "tool", [point_query, request_ndfd_basic, request_ndfd_hourly] +) +def test_agent_tools(weather_agent, tool): + assert tool in weather_agent.tools + + +# Mocking the Agent Run Method +@patch.object( + Agent, + "run", + return_value="No, there are no chances of rain today in Huntsville.", +) +def test_agent_run(mock_run, weather_agent): + response = weather_agent.run( + "Are there any chances of rain today in Huntsville?" + ) + assert ( + response + == "No, there are no chances of rain today in Huntsville." + ) + mock_run.assert_called_once_with( + "Are there any chances of rain today in Huntsville?" + ) + + +# Testing Agent's Response Handling +def test_agent_response_handling(weather_agent): + weather_agent.llm = Mock() + weather_agent.llm.return_value = "Mocked Response" + response = weather_agent.run("What's the weather like?") + assert response == "Mocked Response" + + +# Test for Exception Handling in Agent Run +def test_agent_run_exception_handling(weather_agent): + weather_agent.llm = Mock( + side_effect=Exception("Mocked Exception") + ) + with pytest.raises(Exception, match="Mocked Exception"): + weather_agent.run("Will it rain tomorrow?") + + +# Testing Agent Initialization with Missing Parameters +def test_agent_initialization_missing_params(): + with pytest.raises(TypeError): + Agent(agent_name="WeatherMan Agent") + + +# Mocking Environment Variables +@patch.dict( + os.environ, + {"API_KEY": "mock_api_key", "API_SECRET": "mock_api_secret"}, +) +def test_environment_variables(): + load_dotenv() + assert os.getenv("API_KEY") == "mock_api_key" + assert os.getenv("API_SECRET") == "mock_api_secret" + + +# Testing Tools Functionality (Example: point_query) +def test_point_query(): + response = point_query("test_latitude", "test_longitude") + assert ( + response is not None + ) # Replace with more specific assertions based on actual function behavior + + +# Testing Tools Functionality (Example: request_ndfd_basic) +def test_request_ndfd_basic(): + response = request_ndfd_basic("test_latitude", "test_longitude") + assert ( + response is not None + ) # Replace with more specific assertions based on actual function behavior + + +# Testing Tools Functionality (Example: request_ndfd_hourly) +def test_request_ndfd_hourly(): + response = request_ndfd_hourly("test_latitude", "test_longitude") + assert ( + response is not None + ) # Replace with more specific assertions based on actual function behavior + + +# Grouping and Marking Tests +@pytest.mark.slow +def test_slow_functionality(weather_agent): + response = weather_agent.run("Long running query") + assert response is not None # Example placeholder + + +# Test Coverage Report +# Run the following command to generate a coverage report: `pytest --cov=weather_swarm` diff --git a/playground/weatherman_agent/todo/director_agent.py b/playground/weatherman_agent/todo/director_agent.py new file mode 100644 index 00000000..faa5f1fa --- /dev/null +++ b/playground/weatherman_agent/todo/director_agent.py @@ -0,0 +1,279 @@ +from swarms import Agent +from swarms import llama3Hosted +from weather_swarm.prompts import GLOSSARY_PROMPTS +from pydantic import BaseModel, Field + + +# Define the schema for the HierarchicalSwarmRequest +# class HierarchicalSwarmRequest(BaseModel): +# agents: Dict[str, Any] = Field( +# ..., +# description=( +# "The name of the agents and their respective tasks to be" +# " executed hierarchically." +# ), +# examples={ +# "Weather Director Agent": { +# "task": ( +# "Are there any chances of rain today in" +# " Huntsville?" +# ) +# } +# }, +# ) + + +class HierarchicalSwarmRequest(BaseModel): + task: str = Field( + ..., + description="The user's query.", + examples={ + "What is the current temperature at my location?": { + "task": "What is the current temperature at my location?" + } + }, + ) + agent_name: str = Field( + ..., + description="The name of the specialized agent.", + examples={ + "Current Temperature Retrieval Agent": "Current Temperature Retrieval Agent" + }, + ) + + +# Define the schema for the HierarchicalSwarmResponse +def DIRECTOR_SYSTEM_PROMPT() -> str: + return """**Prompt:** + As a director master agent, your task is to communicate with the user, understand their weather-related queries, and delegate the appropriate tasks to specialized worker agents. Each worker agent is specialized in retrieving a specific type of weather data. Your role involves selecting the correct agent or a list of agents, giving them the necessary tasks, and compiling their responses to provide a comprehensive answer to the user. + + **Goal:** + Efficiently manage and delegate tasks to specialized worker agents to gather the necessary weather data and provide a detailed, accurate response to the user. + + **Process:** + 1. **Receive User Query:** + - Understand the user's question or request regarding weather data. + + 2. **Identify Required Data:** + - Determine the type(s) of weather data needed to answer the user's query. + + 3. **Select Appropriate Agents:** + - Choose the specialized agent(s) capable of retrieving the required data. + + 4. **Delegate Tasks:** + - Assign the relevant task to the selected agent(s) using the appropriate inputs. + + 5. **Compile Responses:** + - Gather and compile the data returned by the worker agents into a cohesive response. + + 6. **Respond to User:** + - Provide a detailed and accurate answer to the user based on the compiled data. + + **Worker Agents and Their Specializations:** + 1. **Current Temperature Retrieval Agent** + - Task: Provide the current temperature based on the user's location. + - Required Inputs: User's location (latitude and longitude). + - API Example: `request_metar_nearest("38", "-96")` + + 2. **Current Weather Description Agent** + - Task: Construct a narrative weather description based on current conditions. + - Required Inputs: User's location (latitude and longitude). + - API Example: `request_metar_nearest("38", "-96")` + + 3. **Rainfall Accumulation Agent** + - Task: Provide the accumulated rainfall at the user's location for the last 24 hours. + - Required Inputs: User's location (latitude and longitude). + - API Example: `point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)` + + 4. **Cloud Coverage Forecast Agent** + - Task: Provide the cloud coverage forecast for the user's location for the next day. + - Required Inputs: User's location (latitude and longitude). + - API Example: `request_ndfd_basic(34.730301, -86.586098, forecast_time)` + + 5. **Precipitation Forecast Agent** + - Task: Provide the precipitation forecast for the user's location for the next 6 hours. + - Required Inputs: User's location (latitude and longitude). + - API Example: `point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)` + + 6. **Maximum Temperature Forecast Agent** + - Task: Provide the maximum forecasted temperature for the user's location for today. + - Required Inputs: User's location (latitude and longitude). + - API Example: `request_ndfd_basic(34.730301, -86.586098, forecast_time)` + + 7. **Wind Speed Forecast Agent** + - Task: Provide the maximum wind speed forecast for the user's location for today. + - Required Inputs: User's location (latitude and longitude). + - API Example: `point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)` + + **Example Workflow:** + 1. **User Query:** + - "What is the current temperature and will it rain in the next 6 hours at my location?" + + 2. **Identify Required Data:** + - Current temperature and precipitation forecast. + + 3. **Select Appropriate Agents:** + - Current Temperature Retrieval Agent + - Precipitation Forecast Agent + + 4. **Delegate Tasks:** + - Current Temperature Retrieval Agent: `request_metar_nearest("38", "-96")` + - Precipitation Forecast Agent: `point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)` + + 5. **Compile Responses:** + - Gather responses from both agents. + + 6. **Respond to User:** + - "The current temperature at your location is X degrees. There is/is not expected to be precipitation in the next 6 hours." + + By following this structured approach, you can efficiently manage user queries and provide accurate, detailed weather information. + """ + + +# Define the schema for the HierarchicalSwarmResponse +def DIRECTOR_SCHEMA() -> str: + return """ + + { + "type": "object", + "properties": { + "task_id": { + "type": "string", + "description": "Unique identifier for the task" + }, + "user_query": { + "type": "string", + "description": "The query provided by the user" + }, + "agents": { + "type": "array", + "description": "List of agents to handle the query", + "items": { + "type": "object", + "properties": { + "agent_name": { + "type": "string", + "description": "Name of the specialized agent" + }, + "task": { + "type": "string", + "description": "Task description for the agent" + }, + }, + "required": ["agent_name", "task"] + } + } + }, + "required": ["task_id", "user_query", "agents"] + } + + """ + + +def DIRECTOR_AGENT_CALLING_FEW_SHOT() -> str: + return """ + + { + "task_id": "1", + "user_query": "What is the current temperature at my location?", + "agents": [ + { + "agent_name": "Current Temperature Retrieval Agent", + "task": "Provide the current temperature based on the user's location.", + } + ] + } + + + ########## "What is the current temperature and will it rain in the next 6 hours at my location? ######### + + { + "task_id": "2", + "user_query": "What is the current temperature and will it rain in the next 6 hours at my location?", + "agents": [ + { + "agent_name": "Current Temperature Retrieval Agent", + "task": "Provide the current temperature based on the user's location.", + }, + { + "agent_name": "Precipitation Forecast Agent", + "task": "Provide the precipitation forecast for the user's location for the next 6 hours.", + } + ] + } + + ########### END OF EXAMPLES ########### + + ############# Example 3: Maximum Temperature and Wind Speed Forecast ######### + { + "task_id": "3", + "user_query": "What is the maximum temperature and wind speed forecast for today at my location?", + "agents": [ + { + "agent_name": "Maximum Temperature Forecast Agent", + "task": "Provide the maximum forecasted temperature for the user's location for today.", + }, + { + "agent_name": "Wind Speed Forecast Agent", + "task": "Provide the maximum wind speed forecast for the user's location for today.", + } + ] + } + + + ############ End of Example 3 ############ + + ############ Example 4: Rainfall Accumulation and Cloud Coverage Forecast ######### + { + "task_id": "4", + "user_query": "How much rain fell at my location in the last 24 hours and what is the cloud coverage forecast for tomorrow?", + "agents": [ + { + "agent_name": "Rainfall Accumulation Agent", + "task": "Provide the accumulated rainfall at the user's location for the last 24 hours.", + }, + { + "agent_name": "Cloud Coverage Forecast Agent", + "task": "Provide the cloud coverage forecast for the user's location for the next day.", + } + ] + } + + ############ End of Example 4 ############ + + """ + + +# [C]reate a new agent +agent = Agent( + agent_name="Weather Director Agent", + system_prompt=DIRECTOR_SYSTEM_PROMPT(), + sop_list=[ + GLOSSARY_PROMPTS, + DIRECTOR_SCHEMA(), + DIRECTOR_AGENT_CALLING_FEW_SHOT(), + ], + # sop=list_tool_schemas_json, + llm=llama3Hosted(max_tokens=1000), + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + # interactive=True, + verbose=True, + # Set the output type to the tool schema which is a BaseModel + output_type=str, # or dict, or str + metadata_output_type="json", + # List of schemas that the agent can handle + function_calling_format_type="OpenAI", + function_calling_type="json", # or soon yaml + # return_history=True, +) + +# Run the agent to generate the person's information +generated_data = agent.run( + "Are there any chances of rain today in Huntsville?" +) + +# Print the generated data +print(f"Generated data: {generated_data}") diff --git a/playground/weatherman_agent/todo/worker_agents.py b/playground/weatherman_agent/todo/worker_agents.py new file mode 100644 index 00000000..ed8d090f --- /dev/null +++ b/playground/weatherman_agent/todo/worker_agents.py @@ -0,0 +1,269 @@ +from swarms import Agent +from swarms import llama3Hosted +from pydantic import BaseModel, Field +from weather_swarm.tools.tools import ( + request_metar_nearest, + point_query, + request_ndfd_basic, + point_query_region, + request_ndfd_hourly, +) + + +class WeatherRequest(BaseModel): + """ + A class to represent the weather request. + + Attributes + ---------- + query : str + The user's query. + """ + + task: str = Field(..., title="The user's query") + tool: str = Field(None, title="The tool to execute") + + +def current_temperature_retrieval_agent(): + return """ + ### Current Temperature Retrieval Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the current temperature based on the user's location. Ensure accuracy and up-to-date information. + + **Goal:** + Allow the user to request the current temperature for their location. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + request_metar_nearest("38", "-96") + """ + + +def current_weather_description_agent(): + return """ + ### Current Weather Description Agent + + **Prompt:** + As a specialized weather data agent, your task is to construct a narrative weather description based on the current conditions at the user's location. + + **Goal:** + Have the LLM construct a narrative weather description based on current conditions. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + request_metar_nearest("38", "-96") + """ + + +def rainfall_accumulation_agent(): + return """ + ### Rainfall Accumulation Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the accumulated rainfall at the user's location for the last 24 hours. + + **Goal:** + Allow the user to determine how much rain has accumulated at their location in the last 24 hours. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4) + """ + + +def cloud_coverage_forecast_agent(): + return """ + ### Cloud Coverage Forecast Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the cloud coverage forecast for the user's location for the next day. + + **Goal:** + Allow the user to determine cloud coverage for their location. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + request_ndfd_basic(34.730301, -86.586098, forecast_time) + """ + + +def precipitation_forecast_agent(): + return """ + ### Precipitation Forecast Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the precipitation forecast for the user's location for the next 6 hours. + + **Goal:** + Allow the user to determine if precipitation will fall in the coming hours. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4) + """ + + +def maximum_temperature_forecast_agent(): + return """ + ### Maximum Temperature Forecast Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the maximum forecasted temperature for the user's location for today. + + **Goal:** + Allow the user to determine how hot or cold the air temperature will be. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + request_ndfd_basic(34.730301, -86.586098, forecast_time) + """ + + +def wind_speed_forecast_agent(): + return """ + ### Wind Speed Forecast Agent + + **Prompt:** + As a specialized weather data agent, your task is to provide the maximum wind speed forecast for the user's location for today. + + **Goal:** + Allow the user to determine the maximum wind speed for that day. + + **Required Inputs:** + User's location (latitude and longitude). + + **API Example:** + point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4) + """ + + +llm = llama3Hosted( + max_tokens=1000, + temperature=0.5, +) + + +# Define the agents with their specific prompts +temp_tracker = Agent( + agent_name="TempTracker", + system_prompt=current_temperature_retrieval_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[request_metar_nearest], +) + +weather_narrator = Agent( + agent_name="WeatherNarrator", + system_prompt=current_weather_description_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[request_metar_nearest], +) + +rain_gauge = Agent( + agent_name="RainGauge", + system_prompt=rainfall_accumulation_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[point_query], +) + +cloud_predictor = Agent( + agent_name="CloudPredictor", + system_prompt=cloud_coverage_forecast_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[request_ndfd_basic], +) + +rain_forecaster = Agent( + agent_name="RainForecaster", + system_prompt=precipitation_forecast_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[point_query_region], +) + +temp_forecaster = Agent( + agent_name="TempForecaster", + system_prompt=maximum_temperature_forecast_agent(), + llm=llm, + max_loops=1, + verbose=True, + output_type=dict, + autosave=True, + dashboard=False, + streaming_on=True, + stopping_token="", + tools=[request_ndfd_hourly], +) + +wind_watcher = Agent( + agent_name="WindWatcher", + system_prompt=wind_speed_forecast_agent(), + llm=llm, + max_loops=1, + autosave=True, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + tools=[point_query_region], +) + +# Create a list +agents = [ + temp_tracker, + weather_narrator, + rain_gauge, + cloud_predictor, + rain_forecaster, + temp_forecaster, + wind_watcher, +] + +# # Create a hierarchical swarm +# swarm = HiearchicalSwarm( +# name = "WeatherSwarm", +# description="A swarm of weather agents", +# agents=agents, +# director = +# ) diff --git a/playground/weatherman_agent/weather_agent.py b/playground/weatherman_agent/weather_agent.py new file mode 100644 index 00000000..998b8922 --- /dev/null +++ b/playground/weatherman_agent/weather_agent.py @@ -0,0 +1,50 @@ +from dotenv import load_dotenv +from swarms import Agent, OpenAIChat + +from weather_swarm.prompts import ( + FEW_SHORT_PROMPTS, + GLOSSARY_PROMPTS, + WEATHER_AGENT_SYSTEM_PROMPT, +) +from weather_swarm.tools.tools import ( + point_query, + request_ndfd_basic, + request_ndfd_hourly, +) + +# Load the environment variables +load_dotenv() + + +# Purpose = To generate weather information for the user and send API requests to the Baron Weather API +agent = Agent( + agent_name="WeatherMan Agent", + system_prompt=WEATHER_AGENT_SYSTEM_PROMPT, + sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS], + # sop=list_tool_schemas_json, + llm=OpenAIChat(), + max_loops=1, + # interactive=True, + dynamic_temperature_enabled=True, + verbose=True, + # Set the output type to the tool schema which is a BaseMode + output_type=str, # or dict, or str + tools=[ + # request_metar_nearest, + point_query, + request_ndfd_basic, + # point_query_region, + request_ndfd_hourly, + ], + docs_folder="datasets", # Add every document in the datasets folder + metadata="json", + function_calling_format_type="OpenAI", + function_calling_type="json", +) + +# Run the agent to generate the person's information +# Run the agent to generate the person's information +output = agent.run("Are there any chances of rain today in Huntsville?") +# # Write the output to a new file +# with open('output.txt', 'w') as f: +# f.write(str(output)) \ No newline at end of file diff --git a/playground/weatherman_agent/weather_swarm/__init__.py b/playground/weatherman_agent/weather_swarm/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/playground/weatherman_agent/weather_swarm/prompts.py b/playground/weatherman_agent/weather_swarm/prompts.py new file mode 100644 index 00000000..83c32499 --- /dev/null +++ b/playground/weatherman_agent/weather_swarm/prompts.py @@ -0,0 +1,152 @@ +GLOSSARY_PROMPTS = """ + +Glossary + +API Terminology +Access_key +A private access key or shared access key (not a secret and not an Application Key) used to access the Baron Weather API. View your access keys on your account page. + +Application Key +Users’ personal and confidential key from which access keys are derived. The application key allows management of access keys. View your application key on your account page. + +Configuration_code +Configuration codes are most often used to differentiate between EPSG:3857 (Mercator) and EPSG:4326 (Geodetic) projections. In the Baron Weather API we add a descriptor to the beginning to indicate any additional parameters to the projection. The default descriptor is ‘Standard’ and will be the primary configuration used, but some data products may offer alternative descriptor to differentiate formatting options. + +Coordinated Universal Time (UTC) +This standard organizes the data so the largest temporal term (the year) appears first in the data string and progresses to the smallest term (the second), like so 2012-12-31TI8:51:23Z. + +Format +The language format for API responses. In the Baron Weather API, responses for text products can be in JSON or JSONP format, and graphical formats are always in png format. + +ISO8601 +The primary time standard by which the world regulates clocks and time. + +Max-age +It's an optional parameter for the metar, buoy, and cwop "nearest" api which allows developers to query a lat/lon and only get back data is more recent than the prescribed date and time. + +Metadata_timestamp +The ISO 8601 UTC date/time for the data found in the returned metadata "time" parameter(s). + +Metadata_valid_time +The ISO 8601 UTC date/time for the data found in the returned metadata "valid_times" list. This is required for forecast products (those that provide a valid_times list in the metadata), but unnecessary for non-forecast products. + +Pages +The page parameter was put in place to minimize the amount of information returned in the response. Text products that support the page parameter return the current page number and the total number of pages when you make a request. Many text products provide thousands of lines of data, which can be overwhelming when users are looking for a specific piece of information for a specific time frame. For example, a developers looking for the current weather conditions at all METAR stations will not need to have thousands of lines of text returned. Instead, we limit them to a maximum number of stations per page, then if users want the full set, they have to ask explicitly for page 2, page 3, etc. in the request URL. + +Product Code +The code to include in the API URL request that is specific to each weather product. + +Reference Time +The time the forecast model begins. In the product-instances metadata, this is called "time". + +Timestamp +The timestamp value included with the request and used to create the signature. Represented as ‘ts’ in request and always in UTC format. + +Timestep +In general, a single point in time for which the product is valid, also called "valid_times". However for accumulation products, the timesteps represent the end of a measured time interval for which total accumulated precipitation is forecast. A list of timesteps or "valid_times" are provided In the product-instances metadata. + +Timestep Interval +The interval between timesteps. + +Valid_times +The list of UTC-formatted timesteps for a forecast product when the Product Instances API is run. + +X +The x-coordinate of the requested tile. This value represents the horizontal index of the tile, assuming an origin of the lower left corner of the tile grid (0,0). These coordinates correspond to the Tile Map Service Specification. + +Y +The y-coordinate of the requested tile. This value represents the vertical index of the tile, assuming an origin of the lower left corner of the tile grid (0,0). These coordinates correspond to the Tile Map Service Specification. + +Z +The z-coordinate of the requested tile. This value represents the zoom level (depth) of the tile. A value of 0 shows the entire world using the minimum number amount of tiles (1 for Mercator, 2 for Geodetic). The maximum available depth may vary by product. These coordinates correspond to the Tile Map Service Specification. + + + + +Meteorological Terminology +dBZ +Stands for decibels relative to Z. It is a meteorological measure of equivalent reflectivity (Z) of a radar signal reflected off a remote object. + +Dew Point +The temperature below which the water vapor in a volume of humid air at a constant barometric pressure will condense into liquid water. + +Heat Index +An index that combines air temperature and relative humidity in an attempt to determine the human-perceived equivalent temperature — how hot it feels. + +Infrared (IR) +In relation to satellite imagery, infrared imagery is produced by satellite analysis of infrared wavelengths. This analysis indicates the temperature of air masses, making it possible to identify cloud cover day or night. + +kft +Stands for thousands of feet. + +Relative Humidity +The ratio of the partial pressure of water vapor in an air-water mixture to the saturated vapor pressure of water at a given temperature. + +Valid Time Event Code (VTEC) +Format in which alerting information is pulled from the National Weather Service. + +Visible Satellite (VIS) +Visible satellite imagery is a snapshot of cloud cover from space. Consequently it is only usable during daylights hours. It is the easiest weather data product for laypeople to understand. + +Warnings +The NWS issues a warning when a hazardous weather or hydrologic event is occurring, is imminent, or has a very high probability of occurring. Often warnings are not issued until conditions have been visually verified. A warning is used for conditions posing a threat to life or property. + +Watches +The NWS issues a watch when the risk of a hazardous weather or hydrologic event has increased significantly, but its occurrence, location, and/or timing is still uncertain. It is intended to provide enough lead time so that those who need to set their plans in motion can do so. + +Water Vapor Satellite +Water vapor imagery is a satellite product which measures the amount of moisture in the atmosphere above 10,000 feet. Bright white areas indicate abundant moisture, which may be converted into clouds or precipitation. Darker areas indicate the presence of drier air. In addition to measuring moisture, water vapor imagery is useful in detecting large scale weather patterns, such as jet streams. + +Wave Dominant Period +The period in seconds between successive waves. + +Wave Height +The maximum reported or forecasted wave height. + +Wind Chill +The perceived decrease in air temperature felt by the body on exposed skin due to the flow of cold air. Wind chill temperature is defined only for temperatures at or below 10 °C (50 °F) and wind speeds above 4.8 kilometers per hour (3.0 mph). + +Wind Gust +A sudden, brief increase in speed of wind. According to US weather observing practice, gusts are reported when the peak wind speed reaches at least 16 knots and the variation in wind speed between the peaks and lulls is at least 9 knots. The duration of a gust is usually less than 20 seconds. + +""" + +WEATHER_AGENT_SYSTEM_PROMPT = """ + +You navigate through tasks efficiently. Whether you're learning something new or need assistance with daily tasks, I can provide information, suggestions, and step-by-step guidance. + +#### How I Can Help: +- **Information Retrieval:** I can fetch and summarize information on a wide range of topics. +- **Problem Solving:** I offer solutions and strategies to address specific challenges. +- **Learning Support:** I assist in understanding new concepts and procedures. + +#### Example: Using the Baron Weather API + +Let's look at how you can use the Baron Weather API to retrieve weather data, which involves making authenticated HTTP requests. + +1. **Understand Your Needs**: Identify what specific weather data you need, such as current conditions or a forecast. +2. **Gather API Details**: Know your API key, the endpoints available, and the data format (JSON). +3. **Authentication**: Learn how to authenticate your requests using your API key and additional security measures as required (like generating signatures). +4. **Craft the Request**: Construct the correct HTTP request to fetch the data you need. +5. **Parse the Response**: After making the request, interpret the JSON response to extract and utilize the weather data. + +Through each step, I can provide explanations, code snippets, and troubleshooting tips to ensure you successfully achieve your goal. + +### Conclusion + +With these steps, you'll be better prepared to use tools like APIs effectively and get the most out of our interactions. If you have questions or need further assistance, feel free to ask! + +--- + +""" + + +FEW_SHORT_PROMPTS = """ +What is the current temperature? allow the user to request the current temperature for their location user's location request_metar_nearest("38", "-96") +Describe the current weather. have the LLM construct a narrative weather description based on current conditions user's location request_metar_nearest("38", "-96") +How much rain fell at my location? allow the user to determine how much rain has accumulated at their location in the last 24 hours user's location point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4) +Is it going to be sunny tomorrow? allow the user to determine cloud coverage for their location user's location request_ndfd_basic(34.730301, -86.586098, forecast_time) +Is rain expected at my location in the next 6 hours? allow the user to determine if precip will fall in the coming hours user's location point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4) +What is the max forecasted temperature today? allow the user to determine how hot or cold the air temp will be user's location request_ndfd_basic(34.730301, -86.586098, forecast_time) +Will it be windy today? allow the user to determine the max wind speed for that day user's location point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4) +""" diff --git a/playground/weatherman_agent/weather_swarm/tools/__init__.py b/playground/weatherman_agent/weather_swarm/tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py b/playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py new file mode 100644 index 00000000..fba0361b --- /dev/null +++ b/playground/weatherman_agent/weather_swarm/tools/baron_tools_schema.py @@ -0,0 +1,145 @@ +from pydantic import BaseModel, Field + + +class RequestMetarNearest(BaseModel): + latitude: str = Field( + ..., + description=( + "The latitude of the location for which the nearest METAR" + " station is requested." + ), + ) + longitude: str = Field( + ..., + description=( + "The longitude of the location for which the nearest" + " METAR station is requested." + ), + ) + + +class PointQueryPrecipTotalAccum24Hr(BaseModel): + layer: str = Field( + ..., + description=( + "The layer of the precipitation total accumulation in the" + " last 24 hours." + ), + ) + projection: str = Field( + ..., + description=( + "The projection of the location for which the" + " precipitation total accumulation is requested." + ), + ) + longitude: float = Field( + ..., + description=( + "The longitude of the location for which the" + " precipitation total accumulation is requested." + ), + ) + latitude: float = Field( + ..., + description=( + "The latitude of the location for which the precipitation" + " total accumulation is requested." + ), + ) + + +class RequestNDFDBasic(BaseModel): + latitude: float = Field( + ..., + description=( + "The latitude of the location for which the NDFD basic" + " forecast is requested." + ), + ) + longitude: float = Field( + ..., + description=( + "The longitude of the location for which the NDFD basic" + " forecast is requested." + ), + ) + forecast_time: str = Field( + ..., + description=( + "The forecast time for which the NDFD basic forecast is" + " requested." + ), + ) + + +class PointQueryBaronHiresMaxReflectivityDbzAll(BaseModel): + layer: str = Field( + ..., + description=( + "The layer of the maximum reflectivity in dBZ for all" + " heights." + ), + ) + projection: str = Field( + ..., + description=( + "The projection of the location for which the maximum" + " reflectivity is requested." + ), + ) + longitude: float = Field( + ..., + description=( + "The longitude of the location for which the maximum" + " reflectivity is requested." + ), + ) + latitude: float = Field( + ..., + description=( + "The latitude of the location for which the maximum" + " reflectivity is requested." + ), + ) + + +class PointQueryBaronHiresWindSpeedMph10Meter(BaseModel): + layer: str = Field( + ..., + description=( + "The layer of the wind speed in mph at 10 meters above" + " ground level." + ), + ) + projection: str = Field( + ..., + description=( + "The projection of the location for which the wind speed" + " is requested." + ), + ) + longitude: float = Field( + ..., + description=( + "The longitude of the location for which the wind speed" + " is requested." + ), + ) + latitude: float = Field( + ..., + description=( + "The latitude of the location for which the wind speed is" + " requested." + ), + ) + + +def _remove_a_key(d: dict, remove_key: str) -> None: + """Remove a key from a dictionary recursively""" + if isinstance(d, dict): + for key in list(d.keys()): + if key == remove_key and "type" in d.keys(): + del d[key] + else: + _remove_a_key(d[key], remove_key) diff --git a/playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py b/playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py new file mode 100644 index 00000000..a5d5f1ba --- /dev/null +++ b/playground/weatherman_agent/weather_swarm/tools/get_geo_coordinates.py @@ -0,0 +1,109 @@ +import requests +from typing import List, Dict, Any + + +def fetch_geocode_by_city( + api_key: str, city: str, timestamp: int, signature: str +) -> List[Dict[str, Any]]: + """ + Fetch geocode data by city name. + + Args: + api_key (str): The API key for authentication. + city (str): The name of the city (e.g., "Austin, Tx"). + timestamp (int): The timestamp for the request. + signature (str): The signature for the request. + + Returns: + List[Dict[str, Any]]: Geocode data for the specified city. + + Raises: + Exception: If the request fails or the response is invalid. + """ + url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/city.json" + params = {"name": city, "ts": timestamp, "sig": signature} + try: + response = requests.get(url, params=params) + response.raise_for_status() + data = response.json() + return data.get("geocode", {}).get("data", []) + except requests.RequestException as e: + raise Exception(f"Failed to fetch geocode data by city: {e}") + except ValueError: + raise Exception("Invalid response format.") + + +def fetch_geocode_by_address( + api_key: str, address: str, timestamp: int, signature: str +) -> List[Dict[str, Any]]: + """ + Fetch geocode data by address. + + Args: + api_key (str): The API key for authentication. + address (str): The address (e.g., "3305 Northland Dr, Austin, Tx"). + timestamp (int): The timestamp for the request. + signature (str): The signature for the request. + + Returns: + List[Dict[str, Any]]: Geocode data for the specified address. + + Raises: + Exception: If the request fails or the response is invalid. + """ + url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/address.json" + params = {"location": address, "ts": timestamp, "sig": signature} + try: + response = requests.get(url, params=params) + response.raise_for_status() + data = response.json() + return data.get("geocode", {}).get("data", []) + except requests.RequestException as e: + raise Exception( + f"Failed to fetch geocode data by address: {e}" + ) + except ValueError: + raise Exception("Invalid response format.") + + +def fetch_geocode_by_zip( + api_key: str, + zip_code: str, + us: int, + timestamp: int, + signature: str, +) -> List[Dict[str, Any]]: + """ + Fetch geocode data by zip code. + + Args: + api_key (str): The API key for authentication. + zip_code (str): The zip code (e.g., "13060"). + us (int): Indicator for US zip code (1 for US, 0 for other). + timestamp (int): The timestamp for the request. + signature (str): The signature for the request. + + Returns: + List[Dict[str, Any]]: Geocode data for the specified zip code. + + Raises: + Exception: If the request fails or the response is invalid. + """ + url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/zip.json" + params = { + "zip": zip_code, + "us": us, + "ts": timestamp, + "sig": signature, + } + try: + response = requests.get(url, params=params) + response.raise_for_status() + data = response.json() + return data.get("geocode", {}).get("data", []) + except requests.RequestException as e: + raise Exception( + f"Failed to fetch geocode data by zip code: {e}" + ) + except ValueError: + raise Exception("Invalid response format.") diff --git a/playground/weatherman_agent/weather_swarm/tools/tools.py b/playground/weatherman_agent/weather_swarm/tools/tools.py new file mode 100644 index 00000000..68b20f44 --- /dev/null +++ b/playground/weatherman_agent/weather_swarm/tools/tools.py @@ -0,0 +1,1281 @@ +# coding: utf-8 + +import base64 +import hashlib +import hmac +import shutil +import time +from urllib.request import urlopen +from urllib.request import Request +from urllib.error import URLError +import os +import json +import codecs +from dotenv import load_dotenv +import datetime + +from typeguard import typechecked +from typing import Union + +load_dotenv() + +latin1 = codecs.lookup("latin-1") + +host = os.environ.get( + "BARON_API_HOST", "http://api.velocityweather.com/v1" +) +access_key = os.environ.get("BARON_ACCESS_KEY", "Y5lHXZfgce7P") +access_key_secret = os.environ.get( + "BARON_ACCESS_KEY_SECRET", + "rcscpInzyLuweENUjUtFDmqLkK1N0EPeaWQRjy7er1", +) + + +@typechecked +def a2w(a: bytes) -> str: + """ + Decodes a byte string using Latin-1 encoding and returns the first character of the decoded string. + + Args: + a (bytes): The byte string to be decoded. + + Returns: + str: The first character of the decoded string. + """ + return latin1.decode(a)[0] + + +@typechecked +def sig(key: str, secret: str) -> str: + """ + Generates a signed string using HMAC-SHA1 and base64 encoding. + + Args: + key (str): The key used for signing. + secret (str): The secret used for signing. + + Returns: + str: The signed string in the format "sig={signature}&ts={timestamp}". + """ + + ts = "{:.0f}".format(time.time()) + to_sign = key + ":" + ts + hashval = hmac.new( + secret.encode("utf-8"), to_sign.encode("utf-8"), hashlib.sha1 + ) + sig = a2w( + base64.urlsafe_b64encode(hashval.digest()).replace( + b"=", b"%3D" + ) + ) + return "sig={}&ts={}".format(sig, ts) + + +@typechecked +def sign_request(url: str, key: str, secret: str) -> str: + """ + Returns a signed URL by appending the signature and timestamp. + + Args: + url (str): The URL to be signed. + key (str): The key used for signing. + secret (str): The secret used for signing. + + Returns: + str: The signed URL with the signature and timestamp appended as query parameters. + """ + + """Returns signed url""" + + signature = sig(key, secret) + q = "?" if url.find("?") == -1 else "&" + url += "{}{}".format(q, signature) + return url + + +########## [START] API REQUESTS ########## +@typechecked +def request_pointquery_nws_watches_warning_all() -> str: + """ + Constructs a URL for querying all NWS watches and warnings for a specific point and signs the request. + + Returns: + str: The signed URL for the point query. + """ + + uri = "/reports/alert/all-poly/point.json?lat=29.70&lon=-80.41" + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_lightning_count() -> str: + """ + Constructs a URL for querying the count of lightning strikes in a specified region and signs the request. + + Returns: + str: The signed URL for the lightning count query. + """ + + uri = "/reports/lightning/count/region.json?w_lon=-160&e_lon=0&n_lat=-2&s_lat=-70" + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_storm_vector(sitecode: str) -> str: + """ + Constructs a URL for querying the storm vector for a specific site and signs the request. + + Args: + sitecode (str): The code of the site for which the storm vector is being queried. + + Returns: + str: The signed URL for the storm vector query. + """ + + uri = "/reports/stormvector/station/%s.json" % (sitecode) + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_geocodeip() -> str: + """ + Constructs a URL for querying the geocode information of an IP address and signs the request. + + Returns: + str: The signed URL for the geocode IP query. + """ + + uri = "/reports/geocode/ipaddress.json" + url = "%s/%s%s" % (host, access_key, uri) + url = sign_request(url, access_key, access_key_secret) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_forecast(lat: float, lon: float) -> dict: + """ + Constructs a URL for querying a 7-day point forecast for a specific latitude and longitude, signs the request, and retrieves the forecast data. + + Args: + lat (float): The latitude for the forecast query. + lon (float): The longitude for the forecast query. + + Returns: + dict: The forecast data for the specified point if the request is successful, otherwise an empty dictionary. + """ + uri = "/reports/pointforecast/basic.json?days=7&lat={}&lon={}".format(lat, lon) + url = "%s/%s%s" % (host, access_key, uri) + url = sign_request(url, access_key, access_key_secret) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return {} + except ValueError as e: + print(e) + return {} + + assert response.code == 200 + data = json.loads(response.read()) + + forecast_data = data.get("pointforecast_basic", {}).get("data", {}) + if isinstance(forecast_data, dict): + return forecast_data + else: + return {"forecast_data": forecast_data} + + +@typechecked +def request_metar_northamerica() -> None: + """ + Constructs a URL for querying METAR data for North America, signs the request, and retrieves the data. + Processes the METAR data and associated forecasts, then saves the data to a JSON file. + + Returns: + None + """ + + uri = "/reports/metar/region.json?n_lat=51.618017&s_lat=23.241346&w_lon=-129.375000&e_lon=-60.644531" + url = "%s/%s%s" % (host, access_key, uri) + url = sign_request(url, access_key, access_key_secret) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + + assert response.code == 200 + data = json.loads(response.read()) + + metars = {} + pages = data["metars"]["meta"]["pages"] + + print("processing {} pages of METAR data".format(pages)) + + for i in range(1, pages + 1): + print("processing page {}".format(i)) + page_url = url + "&page={}".format(i) + try: + response = urlopen(page_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + + assert response.code == 200 + data = json.loads(response.read()) + for metar in data["metars"]["data"]: + siteid = metar["station"]["id"] + print("processing site {}".format(siteid)) + forecast = request_forecast( + metar["station"]["coordinates"][1], + metar["station"]["coordinates"][0], + ) + + metars[siteid] = {"metar": metar, "forecast": forecast} + + with open("metar.json", "w") as metar_jsonfile: + json.dump(metars, metar_jsonfile, indent=4, sort_keys=True) + + + +@typechecked +def request_metar_nearest(lat: str, lon: str): + """ + Requests the nearest METAR (Meteorological Aerodrome Report) data based on the given latitude and longitude. + + Args: + lat (str): The latitude of the location. + lon (str): The longitude of the location. + + Returns: + str: The signed request URL for retrieving the METAR data. + """ + uri = ( + "/reports/metar/nearest.json?lat=%s&lon=%s&within_radius=500&max_age=75" + % ( + lat, + lon, + ) + ) + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_metar(station_id: str) -> str: + """ + Constructs a URL for querying METAR data for a specific station and signs the request. + + Args: + station_id (str): The ID of the station for which the METAR data is being queried. + + Returns: + str: The signed URL for the METAR query. + """ + + uri = "/reports/metar/station/%s.json" % station_id + url = "%s/%s%s" % (host, access_key, uri) + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_ndfd_hourly(lat: float, lon: float, utc_datetime: datetime.datetime) -> str: + """ + Requests NDFD hourly data for a specific latitude, longitude, and UTC datetime. + + Args: + lat (float): The latitude of the location. + lon (float): The longitude of the location. + utc_datetime (datetime.datetime): The UTC datetime for the request. + + Returns: + str: The signed URL for the request. + """ + datetime_str = ( + utc_datetime.replace(microsecond=0).isoformat() + "Z" + ) + uri = f"/reports/ndfd/hourly.json?lat={lat}&lon={lon}&utc={datetime_str}" + url = f"{host}/{access_key}{uri}" + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_ndfd_basic(lat: float, lon: float, utc_datetime: datetime.datetime) -> str: + """ + Requests NDFD basic data for a specific latitude, longitude, and UTC datetime. + + Args: + lat (float): The latitude of the location. + lon (float): The longitude of the location. + utc_datetime (datetime.datetime): The UTC datetime for the request. + + Returns: + str: The signed URL for the request. + """ + + datetime_str = ( + utc_datetime.replace(microsecond=0).isoformat() + "Z" + ) + uri = f"/reports/ndfd/basic.json?lat={lat}&lon={lon}&utc={datetime_str}&days=7" + url = f"{host}/{access_key}{uri}" + return sign_request(url, access_key, access_key_secret) + + +@typechecked +def request_tile(product: str, product_config: str, z: int, x: int, y: int) -> None: + """ + Requests a tile for a specific product and configuration, retrieves the data, and saves it as a PNG file. + + Args: + product (str): The product name. + product_config (str): The product configuration. + z (int): The zoom level. + x (int): The tile's x coordinate. + y (int): The tile's y coordinate. + + Returns: + None + """ + + url = "%s/%s/meta/tiles/product-instances/%s/%s" % ( + host, + access_key, + product, + product_config, + ) + url = sign_request(url, access_key, access_key_secret) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + data = json.loads(response.read()) + + # Select the most recent product instance for this example. + product_instance = data[0] + + url = "%s/%s/tms/1.0.0/%s+%s+%s/%d/%d/%d.png" % ( + host, + access_key, + product, + product_config, + product_instance["time"], + z, + x, + y, + ) + + try: + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + url += "?valid_time={}".format( + product_instance["valid_times"][0] + ) + except KeyError: + pass + + url = sign_request(url, access_key, access_key_secret) + print(url) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + print("headers:") + print( + json.dumps( + response.headers._headers, indent=4, sort_keys=True + ) + ) + + content = response.read() + filename = "./tms_img_{}_{}.png".format(product, product_config) + print( + "Read {} bytes, saving as {}".format(len(content), filename) + ) + with open(filename, "wb") as f: + f.write(content) + + +@typechecked +def point_query(product: str, product_config: str, lon: float, lat: float) -> None: + """ + Queries the most recent 'time' and, if applicable, 'valid_time' for a given product and product configuration at a specified longitude and latitude point. + + Args: + product (str): The product name. + product_config (str): The product configuration. + lon (float): The longitude of the location. + lat (float): The latitude of the location. + + Returns: + None + """ + # Get the list of product instances. + url = "{host}/{key}/meta/tiles/product-instances/{product}/{product_config}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + data = json.loads(response.read()) + + # Select the most recent product instance for this example. + product_instance = data[0] + + # Query our lon, lat point. + url = "{host}/{key}/point/{product}/{product_config}/{product_instance}.{file_type}?lon={lon}&lat={lat}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + product_instance=product_instance["time"], + file_type="json", + lon=lon, + lat=lat, + ) + + try: + if product_instance["valid_times"][0]: + # If it's a forecast product, it will have valid_times. Display them all + url += "&valid_time=*" + + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + # url += '&valid_time={}'.format(product_instance['valid_times'][0]) + + except KeyError: + pass + + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = response.read() + charset = response.headers.get_param("charset") + if charset: + content = content.decode(charset) + content = json.loads(content) + + print("headers:") + print( + json.dumps( + response.headers._headers, indent=4, sort_keys=True + ) + ) + print("content:") + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def point_query_multi(product: str, product_config: str, points: 'list[tuple[float, float]]') -> None: + """ + For the given product and product_config, queries the most recent 'time' + (and most recent 'valid_time' if it's a forecast product) for a list of points. + + Args: + product (str): The product name. + product_config (str): The product configuration. + points (list[tuple[float, float]]): A list of tuples, each containing the longitude and latitude of a point. + + Returns: + None + """ + + """ + For the given product and product_config, queries the most recent 'time' + (and most recent 'valid_time' if it's a forecast product). + """ + + # Get the list of product instances. + url = "{host}/{key}/meta/tiles/product-instances/{product}/{product_config}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + data = json.loads(response.read()) + + # Select the most recent product instance for this example. + product_instance = data[0] + + def format_point(_p, _decimals=3): + return ",".join(str(round(_, _decimals)) for _ in _p) + + # Query our list of lon, lat points + url = "{host}/{key}/point/multi/{product}/{product_config}/{product_instance}.{file_type}?points={points}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + product_instance=product_instance["time"], + file_type="json", + points="|".join(format_point(_) for _ in points), + ) + + try: + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + url += "&valid_time={}".format( + product_instance["valid_times"][0] + ) + except KeyError: + pass + + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + request = Request(url, headers={"Accept-Encoding": "gzip"}) + response = urlopen(request) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + if response.headers.get("Content-Encoding") == "gzip": + import gzip + import io + + compressed_file = io.BytesIO(response.read()) + decompressed_file = gzip.GzipFile(fileobj=compressed_file, mode="rb") + content = decompressed_file.read() + else: + content = response.read() + + charset = response.headers.get_param("charset") + if charset: + content = content.decode(charset) + content = json.loads(content) + + print("headers:") + print( + json.dumps( + response.headers._headers, indent=4, sort_keys=True + ) + ) + print("content:") + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def point_query_region(product: str, product_config: str, n_lat: float, s_lat: float, w_lon: float, e_lon: float) -> None: + """ + For the given product and product_config, queries the most recent 'time' + (and most recent 'valid_time' if it's a forecast product) for a specific region. + + Args: + product (str): The product name. + product_config (str): The product configuration. + n_lat (float): The northern latitude of the region. + s_lat (float): The southern latitude of the region. + w_lon (float): The western longitude of the region. + e_lon (float): The eastern longitude of the region. + + Returns: + None + """ + + """ + For the given product and product_config, queries the most recent 'time' + (and most recent 'valid_time' if it's a forecast product). + """ + + # Get the list of product instances. + url = "{host}/{key}/meta/tiles/product-instances/{product}/{product_config}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + ) + + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + data = json.loads(response.read()) + + # Select the most recent product instance for this example. + product_instance = data[0] + + def format_value(_, _decimals=3): + return str(round(_, _decimals)) + + # Query our region + url = "{host}/{key}/point/region/{product}/{product_config}/{product_instance}.{file_type}?n_lat={n_lat}&s_lat={s_lat}&w_lon={w_lon}&e_lon={e_lon}".format( + host=host, + key=access_key, + product=product, + product_config=product_config, + product_instance=product_instance["time"], + file_type="json", + n_lat=format_value(n_lat), + s_lat=format_value(s_lat), + w_lon=format_value(w_lon), + e_lon=format_value(e_lon), + ) + + try: + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + url += "&valid_time={}".format( + product_instance["valid_times"][0] + ) + except KeyError: + pass + + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + request = Request(url, headers={"Accept-Encoding": "gzip"}) + response = urlopen(request) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + if response.headers.get("Content-Encoding") == "gzip": + import gzip + import io + + compressed_file = io.BytesIO(response.read()) + decompressed_file = gzip.GzipFile(fileobj=compressed_file, mode="rb") + content = decompressed_file.read() + else: + content = response.read() + + charset = response.headers.get_param("charset") + if charset: + content = content.decode(charset) + content = json.loads(content) + + print("headers:") + print( + json.dumps( + response.headers._headers, indent=4, sort_keys=True + ) + ) + print("content:") + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def request_wms_capabilities(product: str, product_config: str) -> None: + """ + Requests WMS capabilities for a specific product and product configuration, signs the request, and prints the response content. + + Args: + product (str): The product name. + product_config (str): The product configuration. + + Returns: + None + """ + + url = "{}/{}/wms/{}/{}?VERSION=1.3.0&SERVICE=WMS&REQUEST=GetCapabilities".format( + host, access_key, product, product_config + ) + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = response.read() + print(content) + + +@typechecked +def request_wms(product: str, product_config: str, image_size_in_pixels: 'list[int]', image_bounds: 'list[float]') -> None: + """ + Requests a WMS image and saves it to disk in the current directory. + + Args: + product (str): The product code, such as 'C39-0x0302-0'. + product_config (str): The product configuration, such as 'Standard-Mercator' or 'Standard-Geodetic'. + image_size_in_pixels (list[int]): The image width and height in pixels, such as [1024, 1024]. + image_bounds (list[float]): The bounds of the image. See below for details depending on the projection. + + A. If requesting a Mercator (EPSG:3857) image: + 1. The coordinates must be in meters. + 2. The WMS 1.3.0 spec requires the coordinates be in this order [xmin, ymin, xmax, ymax]. + 3. As an example, to request the whole world, you would use [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]. + Because this projection stretches to infinity as you approach the poles, the ymin and ymax values + are clipped to the equivalent of -85.05112877980659 and 85.05112877980659 latitude, not -90 and 90 latitude, + resulting in a perfect square of projected meters. + B. If requesting a Geodetic (EPSG:4326) image: + 1. The coordinates must be in decimal degrees. + 2. The WMS 1.3.0 spec requires the coordinates be in this order [lat_min, lon_min, lat_max, lon_max]. + 3. As an example, to request the whole world, you would use [-90, -180, 90, 180]. + + Theoretically it is possible to request any arbitrary combination of image_size_in_pixels and image_bounds, + but this is not advisable and is actually discouraged. It is expected that the proportion you use for + image_width_in_pixels/image_height_in_pixels is equal to image_width_bounds/image_height_bounds. If this is + not the case, you have most likely done some incorrect calculations. It will result in a distorted (stretched + or squished) image that is incorrect for the requested projection. One fairly obvious sign that your + proportions don't match up correctly is that the image you receive from your WMS request will have no + smoothing (interpolation), resulting in jaggy or pixelated data. + + Returns: + None + """ + # Convert the image bounds to a comma-separated string. + image_bounds_str = ",".join(str(x) for x in image_bounds) + + # We're using the TMS-style product instances API here for simplicity. If you + # are using a standards-compliant WMS client, do note that we also provide a + # WMS-style API to retrieve product instances which may be more suitable to your + # needs. See our documentation for details. + + # For this example, we use the optional parameter "page_size" to limit the + # list of product instances to the most recent instance. + meta_url = ( + "{}/{}/meta/tiles/product-instances/{}/{}?page_size=1".format( + host, access_key, product, product_config + ) + ) + meta_url = sign_request(meta_url, access_key, access_key_secret) + + try: + response = urlopen(meta_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + # Decode the product instance response and get the most recent product instance time, + # to be used in the WMS image request. + content = json.loads(response.read()) + product_instance = content[0] + + # WMS uses EPSG codes, while our product configuration code uses 'Geodetic' or + # 'Mercator'. We map between the two here to prepare for the WMS CRS query parameter. + epsg_code = ( + "EPSG:4326" + if product_config.endswith("-Geodetic") + else "EPSG:3857" + ) + + wms_url = "{}/{}/wms/{}/{}?VERSION=1.3.0&SERVICE=WMS&REQUEST=GetMap&CRS={}&LAYERS={}&BBOX={}&WIDTH={}&HEIGHT={}".format( + host, + access_key, + product, + product_config, + epsg_code, + product_instance["time"], + image_bounds_str, + image_size_in_pixels[0], + image_size_in_pixels[1], + ) + + try: + # If it's a forecast product, it will have valid_times. The latest one is used for this example. + wms_url += "&TIME={}".format( + product_instance["valid_times"][0] + ) + except KeyError: + pass + + wms_url = sign_request(wms_url, access_key, access_key_secret) + print(wms_url) + + try: + response = urlopen(wms_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = response.read() + filename = "./wms_img_{}_{}.png".format(product, product_config) + print( + "Read {} bytes, saving as {}".format(len(content), filename) + ) + with open(filename, "wb") as f: + f.write(content) + + + +@typechecked +def request_geotiff(product: str, product_config: str, product_instance: str = "") -> 'tuple[str, dict]': + """ + Requests a GeoTIFF image for a specific product, product configuration, and product instance. + If no product instance is provided, the most recent instance is used. + + Args: + product (str): The product code. + product_config (str): The product configuration. + product_instance (str, optional): The product instance time. Defaults to an empty string. + + Returns: + tuple[str, dict]: The filename where the GeoTIFF is saved and a dictionary of valid times. + """ + + if not product_instance: + # For this example, we use the optional parameter "page_size" to limit the + # list of product instances to the most recent instance. + meta_url = "{}/{}/meta/tiles/product-instances/{}/{}?page_size=1".format( + host, access_key, product, product_config + ) + meta_url = sign_request( + meta_url, access_key, access_key_secret + ) + + try: + response = urlopen(meta_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + # Decode the product instance response and get the most recent product instance time, + # to be used in the geotiff request. + content = json.loads(response.read()) + product_instance = content[0]["time"] + + url = "/".join( + [ + host, + access_key, + "geotiff", + product, + product_config, + product_instance, + ] + ) + url = sign_request(url, access_key, access_key_secret) + print(url) + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = json.loads(response.read()) + url = content["source"] + + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + filename = "./{}.tif".format( + "_".join([product, product_config, product_instance]) + ) + with open(filename, "wb") as f: + # The geotiffs can be very large, so we don't want to read the + # http body entirely into memory before writing -- copy it directly + # to a file instead. + shutil.copyfileobj(response, f) + return filename, content.get("valid_times", {}) + + +@typechecked +def bgfs_basic(lon: float, lat: float, date: Union[datetime.date, datetime.datetime], days: int = 1) -> None: + """ + Requests BGFS basic data for a specific longitude, latitude, date, and number of days. + + Args: + lon (float): The longitude of the location. + lat (float): The latitude of the location. + date (datetime.datetime): The date for the request. + days (int, optional): The number of days for the request. Defaults to 1. + + Returns: + None + """ + + url = "{host}/{key}/reports/bgfs/basic?lon={lon}&lat={lat}&utc={utc}&days={days}".format( + host=host, + key=access_key, + lon=lon, + lat=lat, + utc=date.strftime("%Y-%m-%d"), + days=days, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + content = json.loads(response.read()) + + # Convert back to json only so we can let the json library format the + # response for pretty display. + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def bgfs_extended(lon: float, lat: float, date: Union[datetime.date, datetime.datetime], days: int = 1) -> None: + + """ + Fetches extended weather reports using the BGFS API. + + Args: + lon (float): The longitude of the location. + lat (float): The latitude of the location. + date (datetime.datetime): The date for which the weather reports are requested. + days (int, optional): The number of days for which the weather reports are requested. Defaults to 1. + + Returns: + None + + Raises: + URLError: If there is an error in the URL request. + ValueError: If there is an error in the URL parameters. + """ + + url = "{host}/{key}/reports/bgfs/extended?lon={lon}&lat={lat}&utc={utc}&days={days}".format( + host=host, + key=access_key, + lon=lon, + lat=lat, + utc=date.strftime("%Y-%m-%d"), + days=days, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + content = json.loads(response.read()) + + # Convert back to json only so we can let the json library format the + # response for pretty display. + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +@typechecked +def bgfs_hourly(lon: float, lat: float, date_hour: Union[datetime.date, datetime.datetime], hours: int = 1) -> None: + """ + Fetches hourly weather reports from the BGFS API for the given longitude, latitude, and date hour. + + Args: + lon (float): The longitude of the location. + lat (float): The latitude of the location. + date_hour (datetime.datetime): The date and hour for which to fetch the weather reports. + hours (int, optional): The number of hours of weather reports to fetch. Defaults to 1. + + Returns: + None + + Raises: + URLError: If there is an error in the URL request. + ValueError: If there is an error in the URL parameters. + """ + + url = "{host}/{key}/reports/bgfs/hourly?lon={lon}&lat={lat}&utc={utc}&hours={hours}".format( + host=host, + key=access_key, + lon=lon, + lat=lat, + utc=date_hour.strftime("%Y-%m-%dT%H:%M:%SZ"), + hours=hours, + ) + url = sign_request(url, access_key, access_key_secret) + try: + response = urlopen(url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + content = json.loads(response.read()) + + # Convert back to json only so we can let the json library format the + # response for pretty display. + print( + json.dumps( + content, indent=4, sort_keys=True, ensure_ascii=False + ) + ) + + +from typing import Iterator, Dict +@typechecked +def iter_product_instances(product: str, product_config: str, request_limit: int = 100) -> Iterator[Dict]: + """ + Iterate over all available product instances, one by one, using a + configurable number of instances per request. + + Args: + product (str): The product code. + product_config (str): The product configuration. + request_limit (int, optional): The number of instances to request per API call. Defaults to 100. + + Yields: + dict: A product instance. + + Returns: + None + """ + + url_template = ( + "{}/{}/meta/tiles/product-instances/{}/{}?limit={}".format( + host, access_key, product, product_config, request_limit + ) + ) + url = url_template + + request_count = 0 + content_count = 0 + while content_count < request_limit: + signed_url = sign_request(url, access_key, access_key_secret) + request_count += 1 + try: + response = urlopen(signed_url) + except URLError as e: + print(e) + return + except ValueError as e: + print(e) + return + assert response.code == 200 + + content = json.loads(response.read()) + for item in content: + yield item + + content_count += len(content) + + if len(content) < request_limit: + # We didn't get a full page, so we must be on the last page and + # therefore -- finished. + print( + "Request count: {}. Instance count: {}.".format( + request_count, + (request_count - 1) * request_limit + + len(content), + ) + ) + return + url = "{}&older_than={}".format( + url_template, content[-1]["time"] + ) + + +def test_api_calls(): + url = request_metar_nearest("38", "-96") + print("*** request METAR nearest ***") + print(url) + print(urlopen(url).read()) + print("") + + point_query( + "precip-totalaccum-24hr", "Standard-Mercator", -86.6, 34.4 + ) + + forecast_time = datetime.datetime.utcnow() + url = request_ndfd_basic(34.730301, -86.586098, forecast_time) + print("*** request NDFD hourly ***") + print(url) + print(urlopen(url).read()) + print("") + + # /point/baron-hires-temp-f-2meter/Standard-Mercator/2024-05-02T12%3A00%3A00Z.jsonp?callback=_jqjsp&lat=30.173624550358536&lon=-95.3009033203125&ts=1714685100&sig=IOUh5xEZzyRqzT1MQctn1vxSqXM=&valid_time=* + point_query( + "baron-hires-maxreflectivity-dbz-all", + "Mask1-Mercator", + -86.6, + 34.4, + ) + + point_query( + "baron-hires-windspeed-mph-10meter", + "Standard-Mercator", + -86.6, + 34.4, + ) + + # Get all product instances for a product. + for i, instance in enumerate(iter_product_instances('C39-0x0302-0', 'Standard-Mercator')): + print(type(instance)) + print('{:>3} {}'.format(i, instance['time'])) + + # Or, alternatively, get the product instances using a wms-style request. + request_wms_capabilities('C39-0x0302-0', 'Standard-Mercator') + + # Request the whole world in the EPSG:4326 projection. Note that the proportions for + # the image size in pixels and the image bounds are identical (2:1). + request_wms('C39-0x0302-0', 'Standard-Geodetic', [2048, 1024], [-90.0, -180.0, 90.0, 180.0]) + + # Request the whole world in the EPSG:3857 projection. Note that the proportions for + # the image size in pixels and the image bounds are identical (1:1). + request_wms('C39-0x0302-0', 'Standard-Mercator', [2048, 2048], [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]) + + filename, valid_times = request_geotiff('C39-0x0302-0', 'Standard-Mercator') + + + print("*** request point query ***") + point_query('C09-0x0331-0', 'Standard-Mercator', -86, 34) + print("") + + + # print("*** requesting METARS and Forecasts for North America ***") + # request_metar_northamerica() + # print("") + + + url = request_metar("egll") + print("*** request METAR ***") + print(url) + print(urlopen(url).read()) + print("") + + forecast_time = datetime.datetime.utcnow() + datetime.timedelta(hours=4) + url = request_ndfd_hourly(34.730301, -86.586098, forecast_time) + print("*** request NDFD hourly ***") + print(url) + print(urlopen(url).read()) + print("") + + request_tile("C39-0x0302-0", "Standard-Mercator", 1, 0, 1) + url = request_storm_vector("mhx") + print("*** request storm vectors ***") + print(url) + a = urlopen(url) + print('JSON for storm vectors is %d bytes' % len(urlopen(url).read())) + print("") + url = request_geocodeip() + print("*** geocode IP address ***") + print(url) + print(urlopen(url).read()) + print("") + url = request_lightning_count() + print("*** lightning count ***") + print(url) + print(urlopen(url).read()) + print("") + + date = datetime.datetime.now().date() + datetime.timedelta(days=1) + bgfs_basic(-86.6, 34.4, date, 1) + bgfs_extended(-86.6, 34.4, date, 1) + bgfs_hourly(-86.6, 34.4, datetime.datetime.combine(date, datetime.time(hour=6)), 1) + print("") + + point_query('C09-0x0331-0', 'Standard-Mercator', -86.6, 34.4) + point_query_multi('C09-0x0331-0', 'Standard-Mercator', [(-86.6, 34.4), (-90.14, 38)]) + point_query_region('C09-0x0331-0', 'Standard-Mercator', 34.4, 34.1, -86.6, -86.5) + + + + +# if __name__ == "__main__": +# main() diff --git a/swarms/structs/concat.py b/swarms/structs/concat.py new file mode 100644 index 00000000..6ed2c608 --- /dev/null +++ b/swarms/structs/concat.py @@ -0,0 +1,24 @@ +from typing import List + + +def concat_strings(string_list: List[str]) -> str: + """ + Concatenates a list of strings into a single string. + + Args: + string_list (List[str]): A list of strings to be concatenated. + + Returns: + str: The concatenated string. + + Raises: + TypeError: If the input is not a list of strings. + + """ + if not isinstance(string_list, list): + raise TypeError("Input must be a list of strings.") + + try: + return "".join(string_list) + except TypeError: + raise TypeError("All elements in the list must be strings.") diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py new file mode 100644 index 00000000..bdca22e5 --- /dev/null +++ b/swarms/structs/mixture_of_agents.py @@ -0,0 +1,159 @@ +from swarms.structs.agent import Agent +from swarms.structs.base_swarm import BaseSwarm +from typing import List, Any + +from swarms.structs.conversation import Conversation +from pydantic import BaseModel +from swarms.utils.loguru_logger import logger + + +class AgentRun(BaseModel): + agent_name: str + output: Any + + +class Metadata(BaseModel): + layers: int + agent_runs: List[AgentRun] + final_output: Any + + +class MixtureOfAgents(BaseSwarm): + """ + Represents a mixture of agents in a swarm. + The process is parallel -> sequential -> parallel -> final output agent. + From the paper: https://arxiv.org/pdf/2406.04692 + + Attributes: + agents (List[Agent]): The list of agents in the swarm. + flow (str): The flow of the swarm. + max_loops (int): The maximum number of loops to run. + verbose (bool): Flag indicating whether to print verbose output. + layers (int, optional): The number of layers in the swarm. Defaults to None. + rules (str, optional): The rules for the swarm. Defaults to None. + """ + + def __init__( + self, + name: str = "MixtureOfAgents", + description: str = "A swarm of agents that run in parallel and sequentially.", + agents: List[Agent] = None, + max_loops: int = 1, + verbose: bool = True, + layers: int = None, + rules: str = None, + final_agent: Agent = None, + auto_save: bool = False, + saved_file_name: str = "moe_swarm.json", + ): + self.name = name + self.description = description + self.agents = agents + self.max_loops = max_loops + self.verbose = verbose + self.layers = layers + self.rules = rules + self.final_agent = final_agent + self.auto_save = auto_save + self.saved_file_name = saved_file_name + + # Check the agents + self.agent_check() + self.final_agent_check() + + # Conversation + self.conversation = Conversation( + time_enabled=True, + rules=rules, + ) + + # Initialize the swarm + self.swarm_initialization() + + def agent_check(self): + if not isinstance(self.agents, list): + raise TypeError("Input must be a list of agents.") + for agent in self.agents: + if not isinstance(agent, Agent): + raise TypeError( + "Input must be a list of agents." + "Each agent must be an instance of Agent." + ) + + def final_agent_check(self): + if not isinstance(self.final_agent, Agent): + raise TypeError("Final agent must be an instance of Agent.") + + def swarm_initialization(self): + # Name, description, and logger + logger.info(f"Initializing swarm {self.name}.") + logger.info(f"Description: {self.description}") + logger.info(f"Initializing swarm with {len(self.agents)} agents.") + + def run(self, task: str = None, *args, **kwargs): + try: + # Running the swarm + logger.info(f"Running swarm {self.name}.") + + self.conversation.add("user", task) + + # Conversation history + history = self.conversation.return_history_as_string() + + agent_runs = [] + layer = 0 + while layer < self.layers: + logger.info(f"Running layer {layer} of the swarm.") + # Different Layers + # Run the agents for all agents on the input + responses = [] + for agent in self.agents: + out = agent.run(history, *args, **kwargs) + responses.append((agent.agent_name, out)) + agent_runs.append( + AgentRun(agent_name=agent.agent_name, output=out) + ) + + # Log the agent run + logger.info(f"Agent {agent.agent_name} output: {out}") + + # Add all the responses to the conversation + logger.info("Adding responses to the conversation.") + for agent_name, response in responses: + self.conversation.add(agent_name, response) + + # Update the history + history = self.conversation.return_history_as_string() + + layer += 1 + + logger.info(f"Completed layer {layer} of the swarm.") + + # Run the final output agent on the entire conversation history + logger.info( + "Running the final output agent on the conversation history." + ) + final_output = self.final_agent.run(history, *args, **kwargs) + self.conversation.add( + self.final_agent.agent_name, final_output + ) + + # Create metadata + logger.info("Creating metadata for the swarm.") + metadata = Metadata( + layers=self.layers, + agent_runs=agent_runs, + final_output=final_output, + ) + + # Save metadata to JSON file + logger.info("Saving metadata to JSON file.") + with open(self.saved_file_name, "w") as f: + f.write(metadata.json()) + + return self.conversation.return_history_as_string() + except Exception as e: + logger.error( + f"Error running swarm: {e} try optimizing the swarm inputs or re-iterate on the task." + ) + return None