parent
d011ed8d44
commit
f2ac193e3f
@ -0,0 +1,15 @@
|
||||
# Multi-Agent Orchestration:
|
||||
Swarms was designed to faciliate the communication between many different and specialized agents from a vast array of other frameworks such as langchain, autogen, crew, and more.
|
||||
|
||||
In traditional swarm theory, there are many types of swarms usually for very specialized use-cases and problem sets. Such as Hiearchical and sequential are great for accounting and sales, because there is usually a boss coordinator agent that distributes a workload to other specialized agents.
|
||||
|
||||
|
||||
| **Name** | **Description** | **Code Link** | **Use Cases** |
|
||||
|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------|---------------------------------------------------------------------------------------------------|
|
||||
| Hierarchical Swarms | A system where agents are organized in a hierarchy, with higher-level agents coordinating lower-level agents to achieve complex tasks. | [Code Link](#) | Manufacturing process optimization, multi-level sales management, healthcare resource coordination |
|
||||
| Agent Rearrange | A setup where agents rearrange themselves dynamically based on the task requirements and environmental conditions. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | Adaptive manufacturing lines, dynamic sales territory realignment, flexible healthcare staffing |
|
||||
| Concurrent Workflows | Agents perform different tasks simultaneously, coordinating to complete a larger goal. | [Code Link](#) | Concurrent production lines, parallel sales operations, simultaneous patient care processes |
|
||||
| Sequential Coordination | Agents perform tasks in a specific sequence, where the completion of one task triggers the start of the next. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) | Step-by-step assembly lines, sequential sales processes, stepwise patient treatment workflows |
|
||||
| Parallel Processing | Agents work on different parts of a task simultaneously to speed up the overall process. | [Code Link](#) | Parallel data processing in manufacturing, simultaneous sales analytics, concurrent medical tests |
|
||||
|
||||
|
@ -0,0 +1,5 @@
|
||||
ANTHROPIC_API_KEY="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"
|
||||
SWARMS_API_KEY="GET YOUR KEY AT https://swarms.world/account"
|
||||
BARON_API_HOST="http://api.velocityweather.com/v1"
|
||||
BARON_ACCESS_KEY="Y5lHXZfgce7P"
|
||||
BARON_ACCESS_KEY_SECRET="rcscpInzyLuweENUjUtFDmqLkK1N0EPeaWQRjy7er1"]
|
@ -0,0 +1,204 @@
|
||||
__pycache__/
|
||||
.venv/
|
||||
|
||||
.env
|
||||
|
||||
image/
|
||||
audio/
|
||||
video/
|
||||
dataframe/
|
||||
|
||||
static/generated
|
||||
runs
|
||||
chroma
|
||||
Weather Director Agent_state.json
|
||||
Unit Testing Agent_state.json
|
||||
Devin_state.json
|
||||
swarms/__pycache__
|
||||
artifacts
|
||||
transcript_generator.json
|
||||
venv
|
||||
.DS_Store
|
||||
Cargo.lock
|
||||
.DS_STORE
|
||||
Cargo.lock
|
||||
swarms/agents/.DS_Store
|
||||
artifacts_two
|
||||
logs
|
||||
_build
|
||||
conversation.txt
|
||||
t1_state.json
|
||||
stderr_log.txt
|
||||
t2_state.json
|
||||
.vscode
|
||||
.DS_STORE
|
||||
# Byte-compiled / optimized / DLL files
|
||||
Transcript Generator_state.json
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
.grit
|
||||
swarm-worker-01_state.json
|
||||
error.txt
|
||||
Devin Worker 2_state.json
|
||||
# C extensions
|
||||
*.so
|
||||
.ruff_cache
|
||||
|
||||
|
||||
errors.txt
|
||||
|
||||
Autonomous-Agent-XYZ1B_state.json
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
.DS_Store
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
.vscode/settings.json
|
@ -0,0 +1,112 @@
|
||||
# Baron Weather
|
||||
|
||||
## Overview
|
||||
Baron Weather is a sophisticated toolset designed to enable real-time querying of weather data using the Baron API. It utilizes a swarm of autonomous agents to handle concurrent data requests, optimizing for efficiency and accuracy in weather data retrieval and analysis.
|
||||
|
||||
## Features
|
||||
Baron Weather includes the following key features:
|
||||
- **Real-time Weather Data Access**: Instantly fetch and analyze weather conditions using the Baron API.
|
||||
- **Autonomous Agents**: A swarm system for handling multiple concurrent API queries efficiently.
|
||||
- **Data Visualization**: Tools for visualizing complex meteorological data for easier interpretation.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
Before you begin, ensure you have met the following requirements:
|
||||
- Python 3.10 or newer
|
||||
- git installed on your machine
|
||||
- Install packages like swarms
|
||||
|
||||
## Installation
|
||||
|
||||
There are 2 methods, git cloning which allows you to modify the codebase or pip install for simple usage:
|
||||
|
||||
### Pip
|
||||
`pip3 install -U weather-swarm`
|
||||
|
||||
### Cloning the Repository
|
||||
To get started with Baron Weather, clone the repository to your local machine using:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/baronservices/weatherman_agent.git
|
||||
cd weatherman_agent
|
||||
```
|
||||
|
||||
### Setting Up the Environment
|
||||
Create a Python virtual environment to manage dependencies:
|
||||
|
||||
```bash
|
||||
python -m venv venv
|
||||
source venv/bin/activate # On Windows use `venv\Scripts\activate`
|
||||
```
|
||||
|
||||
### Installing Dependencies
|
||||
Install the necessary Python packages via pip:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Usage
|
||||
To start querying the Baron Weather API using the autonomous agents, run:
|
||||
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
```bash
|
||||
python3 api.py
|
||||
```
|
||||
|
||||
|
||||
### Llama3
|
||||
|
||||
```python
|
||||
from swarms import llama3Hosted
|
||||
|
||||
|
||||
# Example usage
|
||||
llama3 = llama3Hosted(
|
||||
model="meta-llama/Meta-Llama-3-8B-Instruct",
|
||||
temperature=0.8,
|
||||
max_tokens=1000,
|
||||
system_prompt="You are a helpful assistant.",
|
||||
)
|
||||
|
||||
completion_generator = llama3.run(
|
||||
"create an essay on how to bake chicken"
|
||||
)
|
||||
|
||||
print(completion_generator)
|
||||
|
||||
```
|
||||
|
||||
# Documentation
|
||||
- [Llama3Hosted](docs/llama3_hosted.md)
|
||||
|
||||
## Contributing
|
||||
Contributions to Baron Weather are welcome and appreciated. Here's how you can contribute:
|
||||
|
||||
1. Fork the Project
|
||||
2. Create your Feature Branch (`git checkout -b feature/YourAmazingFeature`)
|
||||
3. Commit your Changes (`git commit -m 'Add some YourAmazingFeature'`)
|
||||
4. Push to the Branch (`git push origin feature/YourAmazingFeature`)
|
||||
5. Open a Pull Request
|
||||
|
||||
|
||||
## Tests
|
||||
To run tests run the following:
|
||||
|
||||
`pytest`
|
||||
|
||||
## Contact
|
||||
Project Maintainer - [Kye Gomez](mailto:kye@swarms.world) - [GitHub Profile](https://github.com/baronservices)
|
||||
|
||||
|
||||
# Todo
|
||||
- [ ] Add the schemas to the worker agents to output json
|
||||
- [ ] Implement the parser and the function calling mapping to execute the functions
|
||||
- [ ] Implement the HiearArchical Swarm and plug in and all the agents
|
||||
- [ ] Then, implement the API server wrapping the hiearchical swarm
|
||||
- [ ] Then, Deploy on the server 24/7
|
@ -0,0 +1,119 @@
|
||||
import os
|
||||
import uuid
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from pydantic import BaseModel
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
from weather_swarm.prompts import (
|
||||
FEW_SHORT_PROMPTS,
|
||||
GLOSSARY_PROMPTS,
|
||||
WEATHER_AGENT_SYSTEM_PROMPT,
|
||||
)
|
||||
from weather_swarm.tools.tools import (
|
||||
point_query,
|
||||
request_ndfd_basic,
|
||||
request_ndfd_hourly,
|
||||
)
|
||||
|
||||
load_dotenv()
|
||||
|
||||
logger.info("Starting the API server..")
|
||||
app = FastAPI(debug=True)
|
||||
|
||||
# Load the middleware to handle CORS
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
|
||||
class ChatRequest(BaseModel):
|
||||
model: str
|
||||
prompt: str
|
||||
max_tokens: int = 100
|
||||
temperature: float = 1.0
|
||||
|
||||
|
||||
class ChatResponse(BaseModel):
|
||||
id: str
|
||||
object: str
|
||||
created: int
|
||||
model: str
|
||||
choices: List[Dict[str, Any]]
|
||||
usage: Dict[str, Any]
|
||||
|
||||
|
||||
@app.get("/v1/health")
|
||||
async def health_check():
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@app.get("/v1/models")
|
||||
async def get_models():
|
||||
return {"models": ["WeatherMan Agent"]}
|
||||
|
||||
|
||||
@app.post("/v1/chat/completions", response_model=ChatResponse)
|
||||
async def chat_completions(request: ChatRequest):
|
||||
if request.model != "WeatherMan Agent":
|
||||
raise HTTPException(status_code=400, detail="Model not found")
|
||||
|
||||
# Initialize the WeatherMan Agent
|
||||
agent = Agent(
|
||||
agent_name="WeatherMan Agent",
|
||||
system_prompt=WEATHER_AGENT_SYSTEM_PROMPT,
|
||||
sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS],
|
||||
llm=OpenAIChat(
|
||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||
max_tokens=request.max_tokens,
|
||||
temperature=request.temperature,
|
||||
),
|
||||
max_loops=1,
|
||||
# dynamic_temperature_enabled=True,
|
||||
# verbose=True,
|
||||
output_type=str,
|
||||
metadata_output_type="json",
|
||||
function_calling_format_type="OpenAI",
|
||||
function_calling_type="json",
|
||||
tools=[point_query, request_ndfd_basic, request_ndfd_hourly],
|
||||
)
|
||||
|
||||
# Response from the agent
|
||||
|
||||
try:
|
||||
response = agent.run(request.prompt)
|
||||
return {
|
||||
"id": uuid.uuid4(),
|
||||
"object": "text_completion",
|
||||
"created": int(os.times().system),
|
||||
"model": agent.agent_name,
|
||||
"choices": [{"text": response}],
|
||||
"usage": {
|
||||
"prompt_tokens": len(request.prompt.split()),
|
||||
"completion_tokens": len(response.split()),
|
||||
"total_tokens": len(request.prompt.split())
|
||||
+ len(response.split()),
|
||||
},
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# Example of how to run the FastAPI app
|
||||
def deploy_app(host: str = "0.0.0.0", port: int = 8000):
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(app, host=host, port=port)
|
||||
|
||||
|
||||
# Run the FastAPI app
|
||||
if __name__ == "__main__":
|
||||
deploy_app()
|
|
|
Can't render this file because it has a wrong number of fields in line 9.
|
@ -0,0 +1,112 @@
|
||||
# llama3Hosted Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The `llama3Hosted` class is a high-level interface for interacting with a hosted version of the Llama3 model. This class is designed to simplify the process of generating responses from the Llama3 model by providing an easy-to-use interface for sending requests and receiving responses. The Llama3 model is a state-of-the-art language model developed by Meta, known for its ability to generate human-like text based on the input it receives.
|
||||
|
||||
### Key Features
|
||||
|
||||
- **Model Customization**: Allows the user to specify which version of the Llama3 model to use.
|
||||
- **Temperature Control**: Adjusts the randomness of the generated responses.
|
||||
- **Token Limitation**: Sets a limit on the maximum number of tokens in the generated response.
|
||||
- **System Prompt**: Defines the initial context for the conversation, guiding the model's responses.
|
||||
|
||||
## Purpose
|
||||
|
||||
The `llama3Hosted` class is designed to provide developers with a straightforward way to utilize the capabilities of the Llama3 model without dealing with the complexities of model hosting and API management. It is particularly useful for applications that require natural language understanding and generation, such as chatbots, virtual assistants, and content generation tools.
|
||||
|
||||
## Class Definition
|
||||
|
||||
### llama3Hosted Parameters
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|----------------|--------|-----------------------------------------|--------------------------------------------------------------|
|
||||
| `model` | `str` | `"meta-llama/Meta-Llama-3-8B-Instruct"` | The name or path of the Llama3 model to use. |
|
||||
| `temperature` | `float`| `0.8` | The temperature parameter for generating responses. |
|
||||
| `max_tokens` | `int` | `4000` | The maximum number of tokens in the generated response. |
|
||||
| `system_prompt`| `str` | `"You are a helpful assistant."` | The system prompt to use for generating responses. |
|
||||
| `*args` | | | Variable length argument list. |
|
||||
| `**kwargs` | | | Arbitrary keyword arguments. |
|
||||
|
||||
### Attributes
|
||||
|
||||
| Attribute | Type | Description |
|
||||
|----------------|--------|--------------------------------------------------------------|
|
||||
| `model` | `str` | The name or path of the Llama3 model. |
|
||||
| `temperature` | `float`| The temperature parameter for generating responses. |
|
||||
| `max_tokens` | `int` | The maximum number of tokens in the generated response. |
|
||||
| `system_prompt`| `str` | The system prompt for generating responses. |
|
||||
|
||||
## Method: run
|
||||
|
||||
### Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|--------|-----------------------------------|
|
||||
| `task` | `str` | The user's task or input. |
|
||||
| `*args` | | Variable length argument list. |
|
||||
| `**kwargs`| | Arbitrary keyword arguments. |
|
||||
|
||||
### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|--------------------------------------------|
|
||||
| `str`| The generated response from the Llama3 model.|
|
||||
|
||||
### Usage Examples
|
||||
First install weather_swarm with:
|
||||
|
||||
`$ pip install -U weather-swarm`
|
||||
|
||||
|
||||
#### Example 1: Basic Usage
|
||||
|
||||
```python
|
||||
from weather_swarmn import llama3Hosted
|
||||
|
||||
llama = llama3Hosted()
|
||||
response = llama.run("Tell me a joke.")
|
||||
print(response)
|
||||
```
|
||||
|
||||
#### Example 2: Custom Model and Parameters
|
||||
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
from weather_swarmn import llama3Hosted
|
||||
|
||||
|
||||
llama = llama3Hosted(
|
||||
model="custom-llama-model",
|
||||
temperature=0.5,
|
||||
max_tokens=2000,
|
||||
system_prompt="You are a witty assistant."
|
||||
)
|
||||
response = llama.run("What's the weather like today?")
|
||||
print(response)
|
||||
```
|
||||
|
||||
#### Example 3: Using Additional Arguments
|
||||
|
||||
```python
|
||||
from weather_swarmn import llama3Hosted
|
||||
|
||||
llama = llama3Hosted()
|
||||
response = llama.run("Write a short story.", custom_stop_tokens=[128002, 128003])
|
||||
print(response)
|
||||
```
|
||||
|
||||
## Additional Information and Tips
|
||||
|
||||
- **Temperature Parameter**: The temperature parameter controls the randomness of the model's output. Lower values (close to 0) make the output more deterministic, while higher values (up to 1) make it more random.
|
||||
- **System Prompt**: Crafting an effective system prompt can significantly impact the quality and relevance of the model's responses. Ensure the prompt aligns well with the intended use case.
|
||||
- **Error Handling**: Always include error handling when making API requests to ensure your application can gracefully handle any issues that arise.
|
||||
|
||||
## References and Resources
|
||||
|
||||
- [Llama3 Model Documentation](https://github.com/facebookresearch/llama)
|
||||
- [Requests Library Documentation](https://docs.python-requests.org/en/latest/)
|
||||
- [JSON Library Documentation](https://docs.python.org/3/library/json.html)
|
||||
|
||||
This documentation provides a comprehensive overview of the `llama3Hosted` class, its parameters, attributes, methods, and usage examples. By following this guide, developers can effectively integrate and utilize the Llama3 model in their applications.
|
@ -0,0 +1,113 @@
|
||||
## Weather Agent API Documentation
|
||||
|
||||
### Overview
|
||||
The Weather Agent API provides endpoints to interact with a weather prediction model, "WeatherMan Agent". This API allows users to get weather-related information through chat completions using the OpenAI GPT model with specific prompts and tools.
|
||||
|
||||
### Base URL
|
||||
```
|
||||
http://localhost:8000
|
||||
```
|
||||
|
||||
### Endpoints
|
||||
|
||||
#### Health Check
|
||||
|
||||
##### `GET /v1/health`
|
||||
Checks the health status of the API.
|
||||
|
||||
**Response:**
|
||||
- `200 OK`: Returns a JSON object indicating the status of the API.
|
||||
```json
|
||||
{
|
||||
"status": "ok"
|
||||
}
|
||||
```
|
||||
|
||||
#### Get Models
|
||||
|
||||
##### `GET /v1/models`
|
||||
Retrieves the list of available models.
|
||||
|
||||
**Response:**
|
||||
- `200 OK`: Returns a JSON object with the list of models.
|
||||
```json
|
||||
{
|
||||
"models": ["WeatherMan Agent"]
|
||||
}
|
||||
```
|
||||
|
||||
#### Chat Completions
|
||||
|
||||
##### `POST /v1/chat/completions`
|
||||
Generates weather-related responses based on the provided prompt using the "WeatherMan Agent" model.
|
||||
|
||||
**Request Body:**
|
||||
- `model` (string): The name of the model to use. Must be "WeatherMan Agent".
|
||||
- `prompt` (string): The input prompt for the chat completion.
|
||||
- `max_tokens` (integer, optional): The maximum number of tokens to generate. Default is 100.
|
||||
- `temperature` (float, optional): The sampling temperature for the model. Default is 1.0.
|
||||
|
||||
**Example Request:**
|
||||
```json
|
||||
{
|
||||
"model": "WeatherMan Agent",
|
||||
"prompt": "What will the weather be like tomorrow in New York?",
|
||||
"max_tokens": 100,
|
||||
"temperature": 1.0
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
- `200 OK`: Returns a JSON object with the completion result.
|
||||
```json
|
||||
{
|
||||
"id": "unique-id",
|
||||
"object": "text_completion",
|
||||
"created": 1234567890,
|
||||
"model": "WeatherMan Agent",
|
||||
"choices": [
|
||||
{
|
||||
"text": "The weather tomorrow in New York will be..."
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 10,
|
||||
"completion_tokens": 15,
|
||||
"total_tokens": 25
|
||||
}
|
||||
}
|
||||
```
|
||||
- `400 Bad Request`: If the model specified is not "WeatherMan Agent".
|
||||
```json
|
||||
{
|
||||
"detail": "Model not found"
|
||||
}
|
||||
```
|
||||
- `500 Internal Server Error`: If there is an error processing the request.
|
||||
```json
|
||||
{
|
||||
"detail": "Error message"
|
||||
}
|
||||
```
|
||||
|
||||
### Models
|
||||
The API supports the following model:
|
||||
- **WeatherMan Agent**: A specialized agent for providing weather-related information based on the prompt.
|
||||
|
||||
### Usage
|
||||
|
||||
1. **Health Check:** Verify that the API is running by sending a GET request to `/v1/health`.
|
||||
2. **Get Models:** Retrieve the list of available models by sending a GET request to `/v1/models`.
|
||||
3. **Chat Completions:** Generate a weather-related response by sending a POST request to `/v1/chat/completions` with the required parameters.
|
||||
|
||||
### Error Handling
|
||||
The API returns appropriate HTTP status codes and error messages for different error scenarios:
|
||||
- `400 Bad Request` for invalid requests.
|
||||
- `500 Internal Server Error` for unexpected errors during processing.
|
||||
|
||||
### CORS Configuration
|
||||
The API allows cross-origin requests from any origin, supporting all methods and headers.
|
||||
|
||||
---
|
||||
|
||||
For further assistance or issues, please contact the API support team.
|
@ -0,0 +1,30 @@
|
||||
from weather_swarm.tools.tools import request_metar_nearest
|
||||
from swarms import tool
|
||||
|
||||
|
||||
@tool(
|
||||
name="RequestMetarNearest",
|
||||
description=(
|
||||
"Requests the nearest METAR (Meteorological Aerodrome Report)"
|
||||
" data based on the given latitude and longitude."
|
||||
),
|
||||
return_string=False,
|
||||
return_dict=False,
|
||||
)
|
||||
def request_metar_nearest_new(lat: float, lon: float):
|
||||
"""
|
||||
Requests the nearest METAR (Meteorological Aerodrome Report) data based on the given latitude and longitude.
|
||||
|
||||
Args:
|
||||
lat (float): The latitude of the location.
|
||||
lon (float): The longitude of the location.
|
||||
|
||||
Returns:
|
||||
The METAR data for the nearest location.
|
||||
"""
|
||||
return request_metar_nearest(lat, lon)
|
||||
|
||||
|
||||
out = request_metar_nearest_new(37.7749, -122.4194)
|
||||
print(out)
|
||||
print(type(out))
|
@ -0,0 +1,19 @@
|
||||
from swarms import llama3Hosted
|
||||
|
||||
|
||||
# Example usage
|
||||
llama3 = llama3Hosted(
|
||||
model="meta-llama/Meta-Llama-3-8B-Instruct",
|
||||
temperature=0.8,
|
||||
max_tokens=1000,
|
||||
system_prompt=(
|
||||
"You're a weather agent for Baron Weather, you specialize in"
|
||||
" weather analysis"
|
||||
),
|
||||
)
|
||||
|
||||
completion_generator = llama3.run(
|
||||
"What are the best weather conditions to lay concrete",
|
||||
)
|
||||
|
||||
print(completion_generator)
|
@ -0,0 +1,34 @@
|
||||
from swarms import Agent
|
||||
from swarms import llama3Hosted
|
||||
from weather_swarm.prompts import GLOSSARY_PROMPTS
|
||||
from weather_swarm.prompts import (
|
||||
FEW_SHORT_PROMPTS,
|
||||
WEATHER_AGENT_SYSTEM_PROMPT,
|
||||
)
|
||||
|
||||
|
||||
# Purpose = To generate weather information for the user and send API requests to the Baron Weather API
|
||||
agent = Agent(
|
||||
agent_name="WeatherMan Agent",
|
||||
system_prompt=WEATHER_AGENT_SYSTEM_PROMPT,
|
||||
sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS],
|
||||
# sop=list_tool_schemas_json,
|
||||
llm=llama3Hosted(
|
||||
max_tokens=2000,
|
||||
temperature=0.1,
|
||||
),
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
interactive=True,
|
||||
)
|
||||
|
||||
# Run the agent to generate the person's information
|
||||
generated_data = agent.run(
|
||||
"Based on the current humidity in Huntsville, how frizzy will my"
|
||||
" hair get?"
|
||||
)
|
||||
|
||||
# Print the generated data
|
||||
# print(f"Generated data: {generated_data}")
|
@ -0,0 +1,35 @@
|
||||
from swarms import get_openai_function_schema_from_func
|
||||
|
||||
from weather_swarm.tools.tools import (
|
||||
request_metar_nearest,
|
||||
point_query,
|
||||
request_ndfd_basic,
|
||||
# point_query_region,
|
||||
request_ndfd_hourly,
|
||||
)
|
||||
|
||||
|
||||
def get_schemas_for_funcs(funcs):
|
||||
schemas = []
|
||||
for func in funcs:
|
||||
name = str(func.__name__)
|
||||
description = str(func.__doc__)
|
||||
schema = get_openai_function_schema_from_func(
|
||||
func, name=name, description=description
|
||||
)
|
||||
schemas.append(str(schema))
|
||||
merged_schemas = "\n".join(schemas)
|
||||
return merged_schemas
|
||||
|
||||
|
||||
funcs = [
|
||||
request_metar_nearest,
|
||||
point_query,
|
||||
request_ndfd_basic,
|
||||
# point_query_region,
|
||||
request_ndfd_hourly,
|
||||
]
|
||||
|
||||
schemas = get_schemas_for_funcs(funcs)
|
||||
print(schemas)
|
||||
print(type(schemas))
|
@ -0,0 +1,55 @@
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "weather-swarm"
|
||||
version = "0.0.6"
|
||||
description = "Weather Swarm - Pytorch"
|
||||
license = "MIT"
|
||||
authors = ["Kye Gomez <kye@apac.ai>"]
|
||||
homepage = "https://github.com/baronservices/weatherman_agent"
|
||||
documentation = "https://github.com/baronservices/weatherman_agent" # Add this if you have documentation.
|
||||
readme = "README.md" # Assuming you have a README.md
|
||||
repository = "https://github.com/baronservices/weatherman_agent"
|
||||
keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering"]
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3.9"
|
||||
]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
swarms = "*"
|
||||
pydantic = "2.7.1"
|
||||
|
||||
|
||||
|
||||
[tool.poetry.group.lint.dependencies]
|
||||
ruff = "^0.1.6"
|
||||
types-toml = "^0.10.8.1"
|
||||
types-redis = "^4.3.21.6"
|
||||
types-pytz = "^2023.3.0.0"
|
||||
black = "^23.1.0"
|
||||
types-chardet = "^5.0.4.6"
|
||||
mypy-protobuf = "^3.0.0"
|
||||
|
||||
|
||||
[tool.autopep8]
|
||||
max_line_length = 80
|
||||
ignore = "E501,W6" # or ["E501", "W6"]
|
||||
in-place = true
|
||||
recursive = true
|
||||
aggressive = 3
|
||||
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 70
|
||||
|
||||
[tool.black]
|
||||
line-length = 70
|
||||
target-version = ['py38']
|
||||
preview = true
|
@ -0,0 +1,18 @@
|
||||
swarms
|
||||
pydantic==2.7.1
|
||||
base64==1.0.0
|
||||
datetime==4.3
|
||||
hashlib==20081119
|
||||
hmac==20151222
|
||||
shutil==1.7.0
|
||||
urllib3==1.26.7
|
||||
json5==0.9.6
|
||||
codecs==1.0.0
|
||||
fastapi
|
||||
pytest
|
||||
hydra
|
||||
loguru
|
||||
requests
|
||||
opencv-python
|
||||
beartype
|
||||
termcolor
|
@ -0,0 +1,28 @@
|
||||
# Use an official Python runtime as a parent image
|
||||
FROM python:3.10-slim-buster
|
||||
|
||||
# Set environment varibles
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
|
||||
# Set work directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gcc \
|
||||
default-libmysqlclient-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python dependencies
|
||||
COPY requirements.txt /app/
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy project
|
||||
COPY . /app/
|
||||
|
||||
# Expose port
|
||||
EXPOSE 5000
|
||||
|
||||
# Run the application:
|
||||
CMD ["gunicorn", "-w", "4", "-k", "gevent", "api:app"]
|
@ -0,0 +1,56 @@
|
||||
from unittest.mock import patch
|
||||
from weather_swarm.tools.tools import (
|
||||
request_metar_nearest,
|
||||
point_query,
|
||||
request_ndfd_basic,
|
||||
)
|
||||
|
||||
|
||||
class TestWeatherFunctions:
|
||||
@patch("your_module.request_metar_nearest")
|
||||
def test_request_metar_nearest(self, mock_request_metar_nearest):
|
||||
mock_request_metar_nearest.return_value = "expected_value"
|
||||
result = request_metar_nearest("38", "-96")
|
||||
assert result == "expected_value"
|
||||
|
||||
@patch("your_module.point_query")
|
||||
def test_point_query_precip_totalaccum(self, mock_point_query):
|
||||
mock_point_query.return_value = "expected_value"
|
||||
result = point_query(
|
||||
"precip-totalaccum-24hr", "Standard-Mercator", -86.6, 34.4
|
||||
)
|
||||
assert result == "expected_value"
|
||||
|
||||
@patch("your_module.point_query")
|
||||
def test_point_query_baron_hires_maxreflectivity(
|
||||
self, mock_point_query
|
||||
):
|
||||
mock_point_query.return_value = "expected_value"
|
||||
result = point_query(
|
||||
"baron-hires-maxreflectivity-dbz-all",
|
||||
"Mask1-Mercator",
|
||||
-86.6,
|
||||
34.4,
|
||||
)
|
||||
assert result == "expected_value"
|
||||
|
||||
@patch("your_module.point_query")
|
||||
def test_point_query_baron_hires_windspeed(
|
||||
self, mock_point_query
|
||||
):
|
||||
mock_point_query.return_value = "expected_value"
|
||||
result = point_query(
|
||||
"baron-hires-windspeed-mph-10meter",
|
||||
"Standard-Mercator",
|
||||
-86.6,
|
||||
34.4,
|
||||
)
|
||||
assert result == "expected_value"
|
||||
|
||||
@patch("your_module.request_ndfd_basic")
|
||||
def test_request_ndfd_basic(self, mock_request_ndfd_basic):
|
||||
mock_request_ndfd_basic.return_value = "expected_value"
|
||||
result = request_ndfd_basic(
|
||||
34.730301, -86.586098, "forecast_time"
|
||||
)
|
||||
assert result == "expected_value"
|
@ -0,0 +1,41 @@
|
||||
from unittest.mock import Mock, patch
|
||||
from swarms import llama3Hosted
|
||||
|
||||
|
||||
class TestLlama3Hosted:
|
||||
def setup_method(self):
|
||||
self.llama = llama3Hosted()
|
||||
|
||||
def test_init(self):
|
||||
assert (
|
||||
self.llama.model == "meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
)
|
||||
assert self.llama.temperature == 0.8
|
||||
assert self.llama.max_tokens == 4000
|
||||
assert (
|
||||
self.llama.system_prompt == "You are a helpful assistant."
|
||||
)
|
||||
|
||||
@patch("requests.request")
|
||||
def test_run(self, mock_request):
|
||||
mock_response = Mock()
|
||||
expected_result = "Test response"
|
||||
mock_response.json.return_value = {
|
||||
"choices": [{"message": {"content": expected_result}}]
|
||||
}
|
||||
mock_request.return_value = mock_response
|
||||
|
||||
result = self.llama.run("Test task")
|
||||
assert result == expected_result
|
||||
mock_request.assert_called_once_with(
|
||||
"POST",
|
||||
"http://34.204.8.31:30001/v1/chat/completions",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=(
|
||||
'{"model": "meta-llama/Meta-Llama-3-8B-Instruct",'
|
||||
' "messages": [{"role": "system", "content": "You are'
|
||||
' a helpful assistant."}, {"role": "user", "content":'
|
||||
' "Test task"}], "stop_token_ids": [128009, 128001],'
|
||||
' "temperature": 0.8, "max_tokens": 4000}'
|
||||
),
|
||||
)
|
@ -0,0 +1,161 @@
|
||||
import os
|
||||
import pytest
|
||||
from dotenv import load_dotenv
|
||||
from weather_swarm import Agent
|
||||
from weather_swarm.prompts import (
|
||||
WEATHER_AGENT_SYSTEM_PROMPT,
|
||||
GLOSSARY_PROMPTS,
|
||||
FEW_SHORT_PROMPTS,
|
||||
)
|
||||
from weather_swarm.tools.tools import (
|
||||
point_query,
|
||||
request_ndfd_basic,
|
||||
request_ndfd_hourly,
|
||||
)
|
||||
from swarms import OpenAIChat
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
# Load environment variables for tests
|
||||
load_dotenv()
|
||||
|
||||
|
||||
# Fixtures
|
||||
@pytest.fixture
|
||||
def weather_agent():
|
||||
return Agent(
|
||||
agent_name="WeatherMan Agent",
|
||||
system_prompt=WEATHER_AGENT_SYSTEM_PROMPT,
|
||||
sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS],
|
||||
llm=OpenAIChat(),
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
verbose=True,
|
||||
output_type=str,
|
||||
tools=[point_query, request_ndfd_basic, request_ndfd_hourly],
|
||||
docs_folder="datasets",
|
||||
metadata="json",
|
||||
function_calling_format_type="OpenAI",
|
||||
function_calling_type="json",
|
||||
)
|
||||
|
||||
|
||||
# Test Environment Loading
|
||||
def test_load_dotenv():
|
||||
assert (
|
||||
"API_KEY" in os.environ
|
||||
), "API_KEY not found in environment variables"
|
||||
assert (
|
||||
"API_SECRET" in os.environ
|
||||
), "API_SECRET not found in environment variables"
|
||||
|
||||
|
||||
# Test Agent Initialization
|
||||
def test_agent_initialization(weather_agent):
|
||||
assert weather_agent.agent_name == "WeatherMan Agent"
|
||||
assert weather_agent.system_prompt == WEATHER_AGENT_SYSTEM_PROMPT
|
||||
assert weather_agent.llm is not None
|
||||
assert len(weather_agent.tools) == 3
|
||||
assert weather_agent.max_loops == 1
|
||||
assert weather_agent.dynamic_temperature_enabled is True
|
||||
assert weather_agent.verbose is True
|
||||
assert weather_agent.output_type == str
|
||||
assert weather_agent.docs_folder == "datasets"
|
||||
assert weather_agent.metadata == "json"
|
||||
assert weather_agent.function_calling_format_type == "OpenAI"
|
||||
assert weather_agent.function_calling_type == "json"
|
||||
|
||||
|
||||
# Parameterized Testing for Agent Tools
|
||||
@pytest.mark.parametrize(
|
||||
"tool", [point_query, request_ndfd_basic, request_ndfd_hourly]
|
||||
)
|
||||
def test_agent_tools(weather_agent, tool):
|
||||
assert tool in weather_agent.tools
|
||||
|
||||
|
||||
# Mocking the Agent Run Method
|
||||
@patch.object(
|
||||
Agent,
|
||||
"run",
|
||||
return_value="No, there are no chances of rain today in Huntsville.",
|
||||
)
|
||||
def test_agent_run(mock_run, weather_agent):
|
||||
response = weather_agent.run(
|
||||
"Are there any chances of rain today in Huntsville?"
|
||||
)
|
||||
assert (
|
||||
response
|
||||
== "No, there are no chances of rain today in Huntsville."
|
||||
)
|
||||
mock_run.assert_called_once_with(
|
||||
"Are there any chances of rain today in Huntsville?"
|
||||
)
|
||||
|
||||
|
||||
# Testing Agent's Response Handling
|
||||
def test_agent_response_handling(weather_agent):
|
||||
weather_agent.llm = Mock()
|
||||
weather_agent.llm.return_value = "Mocked Response"
|
||||
response = weather_agent.run("What's the weather like?")
|
||||
assert response == "Mocked Response"
|
||||
|
||||
|
||||
# Test for Exception Handling in Agent Run
|
||||
def test_agent_run_exception_handling(weather_agent):
|
||||
weather_agent.llm = Mock(
|
||||
side_effect=Exception("Mocked Exception")
|
||||
)
|
||||
with pytest.raises(Exception, match="Mocked Exception"):
|
||||
weather_agent.run("Will it rain tomorrow?")
|
||||
|
||||
|
||||
# Testing Agent Initialization with Missing Parameters
|
||||
def test_agent_initialization_missing_params():
|
||||
with pytest.raises(TypeError):
|
||||
Agent(agent_name="WeatherMan Agent")
|
||||
|
||||
|
||||
# Mocking Environment Variables
|
||||
@patch.dict(
|
||||
os.environ,
|
||||
{"API_KEY": "mock_api_key", "API_SECRET": "mock_api_secret"},
|
||||
)
|
||||
def test_environment_variables():
|
||||
load_dotenv()
|
||||
assert os.getenv("API_KEY") == "mock_api_key"
|
||||
assert os.getenv("API_SECRET") == "mock_api_secret"
|
||||
|
||||
|
||||
# Testing Tools Functionality (Example: point_query)
|
||||
def test_point_query():
|
||||
response = point_query("test_latitude", "test_longitude")
|
||||
assert (
|
||||
response is not None
|
||||
) # Replace with more specific assertions based on actual function behavior
|
||||
|
||||
|
||||
# Testing Tools Functionality (Example: request_ndfd_basic)
|
||||
def test_request_ndfd_basic():
|
||||
response = request_ndfd_basic("test_latitude", "test_longitude")
|
||||
assert (
|
||||
response is not None
|
||||
) # Replace with more specific assertions based on actual function behavior
|
||||
|
||||
|
||||
# Testing Tools Functionality (Example: request_ndfd_hourly)
|
||||
def test_request_ndfd_hourly():
|
||||
response = request_ndfd_hourly("test_latitude", "test_longitude")
|
||||
assert (
|
||||
response is not None
|
||||
) # Replace with more specific assertions based on actual function behavior
|
||||
|
||||
|
||||
# Grouping and Marking Tests
|
||||
@pytest.mark.slow
|
||||
def test_slow_functionality(weather_agent):
|
||||
response = weather_agent.run("Long running query")
|
||||
assert response is not None # Example placeholder
|
||||
|
||||
|
||||
# Test Coverage Report
|
||||
# Run the following command to generate a coverage report: `pytest --cov=weather_swarm`
|
@ -0,0 +1,279 @@
|
||||
from swarms import Agent
|
||||
from swarms import llama3Hosted
|
||||
from weather_swarm.prompts import GLOSSARY_PROMPTS
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
# Define the schema for the HierarchicalSwarmRequest
|
||||
# class HierarchicalSwarmRequest(BaseModel):
|
||||
# agents: Dict[str, Any] = Field(
|
||||
# ...,
|
||||
# description=(
|
||||
# "The name of the agents and their respective tasks to be"
|
||||
# " executed hierarchically."
|
||||
# ),
|
||||
# examples={
|
||||
# "Weather Director Agent": {
|
||||
# "task": (
|
||||
# "Are there any chances of rain today in"
|
||||
# " Huntsville?"
|
||||
# )
|
||||
# }
|
||||
# },
|
||||
# )
|
||||
|
||||
|
||||
class HierarchicalSwarmRequest(BaseModel):
|
||||
task: str = Field(
|
||||
...,
|
||||
description="The user's query.",
|
||||
examples={
|
||||
"What is the current temperature at my location?": {
|
||||
"task": "What is the current temperature at my location?"
|
||||
}
|
||||
},
|
||||
)
|
||||
agent_name: str = Field(
|
||||
...,
|
||||
description="The name of the specialized agent.",
|
||||
examples={
|
||||
"Current Temperature Retrieval Agent": "Current Temperature Retrieval Agent"
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# Define the schema for the HierarchicalSwarmResponse
|
||||
def DIRECTOR_SYSTEM_PROMPT() -> str:
|
||||
return """**Prompt:**
|
||||
As a director master agent, your task is to communicate with the user, understand their weather-related queries, and delegate the appropriate tasks to specialized worker agents. Each worker agent is specialized in retrieving a specific type of weather data. Your role involves selecting the correct agent or a list of agents, giving them the necessary tasks, and compiling their responses to provide a comprehensive answer to the user.
|
||||
|
||||
**Goal:**
|
||||
Efficiently manage and delegate tasks to specialized worker agents to gather the necessary weather data and provide a detailed, accurate response to the user.
|
||||
|
||||
**Process:**
|
||||
1. **Receive User Query:**
|
||||
- Understand the user's question or request regarding weather data.
|
||||
|
||||
2. **Identify Required Data:**
|
||||
- Determine the type(s) of weather data needed to answer the user's query.
|
||||
|
||||
3. **Select Appropriate Agents:**
|
||||
- Choose the specialized agent(s) capable of retrieving the required data.
|
||||
|
||||
4. **Delegate Tasks:**
|
||||
- Assign the relevant task to the selected agent(s) using the appropriate inputs.
|
||||
|
||||
5. **Compile Responses:**
|
||||
- Gather and compile the data returned by the worker agents into a cohesive response.
|
||||
|
||||
6. **Respond to User:**
|
||||
- Provide a detailed and accurate answer to the user based on the compiled data.
|
||||
|
||||
**Worker Agents and Their Specializations:**
|
||||
1. **Current Temperature Retrieval Agent**
|
||||
- Task: Provide the current temperature based on the user's location.
|
||||
- Required Inputs: User's location (latitude and longitude).
|
||||
- API Example: `request_metar_nearest("38", "-96")`
|
||||
|
||||
2. **Current Weather Description Agent**
|
||||
- Task: Construct a narrative weather description based on current conditions.
|
||||
- Required Inputs: User's location (latitude and longitude).
|
||||
- API Example: `request_metar_nearest("38", "-96")`
|
||||
|
||||
3. **Rainfall Accumulation Agent**
|
||||
- Task: Provide the accumulated rainfall at the user's location for the last 24 hours.
|
||||
- Required Inputs: User's location (latitude and longitude).
|
||||
- API Example: `point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)`
|
||||
|
||||
4. **Cloud Coverage Forecast Agent**
|
||||
- Task: Provide the cloud coverage forecast for the user's location for the next day.
|
||||
- Required Inputs: User's location (latitude and longitude).
|
||||
- API Example: `request_ndfd_basic(34.730301, -86.586098, forecast_time)`
|
||||
|
||||
5. **Precipitation Forecast Agent**
|
||||
- Task: Provide the precipitation forecast for the user's location for the next 6 hours.
|
||||
- Required Inputs: User's location (latitude and longitude).
|
||||
- API Example: `point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)`
|
||||
|
||||
6. **Maximum Temperature Forecast Agent**
|
||||
- Task: Provide the maximum forecasted temperature for the user's location for today.
|
||||
- Required Inputs: User's location (latitude and longitude).
|
||||
- API Example: `request_ndfd_basic(34.730301, -86.586098, forecast_time)`
|
||||
|
||||
7. **Wind Speed Forecast Agent**
|
||||
- Task: Provide the maximum wind speed forecast for the user's location for today.
|
||||
- Required Inputs: User's location (latitude and longitude).
|
||||
- API Example: `point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)`
|
||||
|
||||
**Example Workflow:**
|
||||
1. **User Query:**
|
||||
- "What is the current temperature and will it rain in the next 6 hours at my location?"
|
||||
|
||||
2. **Identify Required Data:**
|
||||
- Current temperature and precipitation forecast.
|
||||
|
||||
3. **Select Appropriate Agents:**
|
||||
- Current Temperature Retrieval Agent
|
||||
- Precipitation Forecast Agent
|
||||
|
||||
4. **Delegate Tasks:**
|
||||
- Current Temperature Retrieval Agent: `request_metar_nearest("38", "-96")`
|
||||
- Precipitation Forecast Agent: `point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)`
|
||||
|
||||
5. **Compile Responses:**
|
||||
- Gather responses from both agents.
|
||||
|
||||
6. **Respond to User:**
|
||||
- "The current temperature at your location is X degrees. There is/is not expected to be precipitation in the next 6 hours."
|
||||
|
||||
By following this structured approach, you can efficiently manage user queries and provide accurate, detailed weather information.
|
||||
"""
|
||||
|
||||
|
||||
# Define the schema for the HierarchicalSwarmResponse
|
||||
def DIRECTOR_SCHEMA() -> str:
|
||||
return """
|
||||
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"task_id": {
|
||||
"type": "string",
|
||||
"description": "Unique identifier for the task"
|
||||
},
|
||||
"user_query": {
|
||||
"type": "string",
|
||||
"description": "The query provided by the user"
|
||||
},
|
||||
"agents": {
|
||||
"type": "array",
|
||||
"description": "List of agents to handle the query",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"agent_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the specialized agent"
|
||||
},
|
||||
"task": {
|
||||
"type": "string",
|
||||
"description": "Task description for the agent"
|
||||
},
|
||||
},
|
||||
"required": ["agent_name", "task"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["task_id", "user_query", "agents"]
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def DIRECTOR_AGENT_CALLING_FEW_SHOT() -> str:
|
||||
return """
|
||||
|
||||
{
|
||||
"task_id": "1",
|
||||
"user_query": "What is the current temperature at my location?",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Current Temperature Retrieval Agent",
|
||||
"task": "Provide the current temperature based on the user's location.",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
########## "What is the current temperature and will it rain in the next 6 hours at my location? #########
|
||||
|
||||
{
|
||||
"task_id": "2",
|
||||
"user_query": "What is the current temperature and will it rain in the next 6 hours at my location?",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Current Temperature Retrieval Agent",
|
||||
"task": "Provide the current temperature based on the user's location.",
|
||||
},
|
||||
{
|
||||
"agent_name": "Precipitation Forecast Agent",
|
||||
"task": "Provide the precipitation forecast for the user's location for the next 6 hours.",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
########### END OF EXAMPLES ###########
|
||||
|
||||
############# Example 3: Maximum Temperature and Wind Speed Forecast #########
|
||||
{
|
||||
"task_id": "3",
|
||||
"user_query": "What is the maximum temperature and wind speed forecast for today at my location?",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Maximum Temperature Forecast Agent",
|
||||
"task": "Provide the maximum forecasted temperature for the user's location for today.",
|
||||
},
|
||||
{
|
||||
"agent_name": "Wind Speed Forecast Agent",
|
||||
"task": "Provide the maximum wind speed forecast for the user's location for today.",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
############ End of Example 3 ############
|
||||
|
||||
############ Example 4: Rainfall Accumulation and Cloud Coverage Forecast #########
|
||||
{
|
||||
"task_id": "4",
|
||||
"user_query": "How much rain fell at my location in the last 24 hours and what is the cloud coverage forecast for tomorrow?",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Rainfall Accumulation Agent",
|
||||
"task": "Provide the accumulated rainfall at the user's location for the last 24 hours.",
|
||||
},
|
||||
{
|
||||
"agent_name": "Cloud Coverage Forecast Agent",
|
||||
"task": "Provide the cloud coverage forecast for the user's location for the next day.",
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
############ End of Example 4 ############
|
||||
|
||||
"""
|
||||
|
||||
|
||||
# [C]reate a new agent
|
||||
agent = Agent(
|
||||
agent_name="Weather Director Agent",
|
||||
system_prompt=DIRECTOR_SYSTEM_PROMPT(),
|
||||
sop_list=[
|
||||
GLOSSARY_PROMPTS,
|
||||
DIRECTOR_SCHEMA(),
|
||||
DIRECTOR_AGENT_CALLING_FEW_SHOT(),
|
||||
],
|
||||
# sop=list_tool_schemas_json,
|
||||
llm=llama3Hosted(max_tokens=1000),
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
# interactive=True,
|
||||
verbose=True,
|
||||
# Set the output type to the tool schema which is a BaseModel
|
||||
output_type=str, # or dict, or str
|
||||
metadata_output_type="json",
|
||||
# List of schemas that the agent can handle
|
||||
function_calling_format_type="OpenAI",
|
||||
function_calling_type="json", # or soon yaml
|
||||
# return_history=True,
|
||||
)
|
||||
|
||||
# Run the agent to generate the person's information
|
||||
generated_data = agent.run(
|
||||
"Are there any chances of rain today in Huntsville?"
|
||||
)
|
||||
|
||||
# Print the generated data
|
||||
print(f"Generated data: {generated_data}")
|
@ -0,0 +1,269 @@
|
||||
from swarms import Agent
|
||||
from swarms import llama3Hosted
|
||||
from pydantic import BaseModel, Field
|
||||
from weather_swarm.tools.tools import (
|
||||
request_metar_nearest,
|
||||
point_query,
|
||||
request_ndfd_basic,
|
||||
point_query_region,
|
||||
request_ndfd_hourly,
|
||||
)
|
||||
|
||||
|
||||
class WeatherRequest(BaseModel):
|
||||
"""
|
||||
A class to represent the weather request.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
query : str
|
||||
The user's query.
|
||||
"""
|
||||
|
||||
task: str = Field(..., title="The user's query")
|
||||
tool: str = Field(None, title="The tool to execute")
|
||||
|
||||
|
||||
def current_temperature_retrieval_agent():
|
||||
return """
|
||||
### Current Temperature Retrieval Agent
|
||||
|
||||
**Prompt:**
|
||||
As a specialized weather data agent, your task is to provide the current temperature based on the user's location. Ensure accuracy and up-to-date information.
|
||||
|
||||
**Goal:**
|
||||
Allow the user to request the current temperature for their location.
|
||||
|
||||
**Required Inputs:**
|
||||
User's location (latitude and longitude).
|
||||
|
||||
**API Example:**
|
||||
request_metar_nearest("38", "-96")
|
||||
"""
|
||||
|
||||
|
||||
def current_weather_description_agent():
|
||||
return """
|
||||
### Current Weather Description Agent
|
||||
|
||||
**Prompt:**
|
||||
As a specialized weather data agent, your task is to construct a narrative weather description based on the current conditions at the user's location.
|
||||
|
||||
**Goal:**
|
||||
Have the LLM construct a narrative weather description based on current conditions.
|
||||
|
||||
**Required Inputs:**
|
||||
User's location (latitude and longitude).
|
||||
|
||||
**API Example:**
|
||||
request_metar_nearest("38", "-96")
|
||||
"""
|
||||
|
||||
|
||||
def rainfall_accumulation_agent():
|
||||
return """
|
||||
### Rainfall Accumulation Agent
|
||||
|
||||
**Prompt:**
|
||||
As a specialized weather data agent, your task is to provide the accumulated rainfall at the user's location for the last 24 hours.
|
||||
|
||||
**Goal:**
|
||||
Allow the user to determine how much rain has accumulated at their location in the last 24 hours.
|
||||
|
||||
**Required Inputs:**
|
||||
User's location (latitude and longitude).
|
||||
|
||||
**API Example:**
|
||||
point_query('precip-totalaccum-24hr', 'Standard-Mercator', -86.6, 34.4)
|
||||
"""
|
||||
|
||||
|
||||
def cloud_coverage_forecast_agent():
|
||||
return """
|
||||
### Cloud Coverage Forecast Agent
|
||||
|
||||
**Prompt:**
|
||||
As a specialized weather data agent, your task is to provide the cloud coverage forecast for the user's location for the next day.
|
||||
|
||||
**Goal:**
|
||||
Allow the user to determine cloud coverage for their location.
|
||||
|
||||
**Required Inputs:**
|
||||
User's location (latitude and longitude).
|
||||
|
||||
**API Example:**
|
||||
request_ndfd_basic(34.730301, -86.586098, forecast_time)
|
||||
"""
|
||||
|
||||
|
||||
def precipitation_forecast_agent():
|
||||
return """
|
||||
### Precipitation Forecast Agent
|
||||
|
||||
**Prompt:**
|
||||
As a specialized weather data agent, your task is to provide the precipitation forecast for the user's location for the next 6 hours.
|
||||
|
||||
**Goal:**
|
||||
Allow the user to determine if precipitation will fall in the coming hours.
|
||||
|
||||
**Required Inputs:**
|
||||
User's location (latitude and longitude).
|
||||
|
||||
**API Example:**
|
||||
point_query('baron-hires-maxreflectivity-dbz-all', 'Mask1-Mercator', -86.6, 34.4)
|
||||
"""
|
||||
|
||||
|
||||
def maximum_temperature_forecast_agent():
|
||||
return """
|
||||
### Maximum Temperature Forecast Agent
|
||||
|
||||
**Prompt:**
|
||||
As a specialized weather data agent, your task is to provide the maximum forecasted temperature for the user's location for today.
|
||||
|
||||
**Goal:**
|
||||
Allow the user to determine how hot or cold the air temperature will be.
|
||||
|
||||
**Required Inputs:**
|
||||
User's location (latitude and longitude).
|
||||
|
||||
**API Example:**
|
||||
request_ndfd_basic(34.730301, -86.586098, forecast_time)
|
||||
"""
|
||||
|
||||
|
||||
def wind_speed_forecast_agent():
|
||||
return """
|
||||
### Wind Speed Forecast Agent
|
||||
|
||||
**Prompt:**
|
||||
As a specialized weather data agent, your task is to provide the maximum wind speed forecast for the user's location for today.
|
||||
|
||||
**Goal:**
|
||||
Allow the user to determine the maximum wind speed for that day.
|
||||
|
||||
**Required Inputs:**
|
||||
User's location (latitude and longitude).
|
||||
|
||||
**API Example:**
|
||||
point_query('baron-hires-windspeed-mph-10meter', 'Standard-Mercator', -86.6, 34.4)
|
||||
"""
|
||||
|
||||
|
||||
llm = llama3Hosted(
|
||||
max_tokens=1000,
|
||||
temperature=0.5,
|
||||
)
|
||||
|
||||
|
||||
# Define the agents with their specific prompts
|
||||
temp_tracker = Agent(
|
||||
agent_name="TempTracker",
|
||||
system_prompt=current_temperature_retrieval_agent(),
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
tools=[request_metar_nearest],
|
||||
)
|
||||
|
||||
weather_narrator = Agent(
|
||||
agent_name="WeatherNarrator",
|
||||
system_prompt=current_weather_description_agent(),
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
tools=[request_metar_nearest],
|
||||
)
|
||||
|
||||
rain_gauge = Agent(
|
||||
agent_name="RainGauge",
|
||||
system_prompt=rainfall_accumulation_agent(),
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
tools=[point_query],
|
||||
)
|
||||
|
||||
cloud_predictor = Agent(
|
||||
agent_name="CloudPredictor",
|
||||
system_prompt=cloud_coverage_forecast_agent(),
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
tools=[request_ndfd_basic],
|
||||
)
|
||||
|
||||
rain_forecaster = Agent(
|
||||
agent_name="RainForecaster",
|
||||
system_prompt=precipitation_forecast_agent(),
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
tools=[point_query_region],
|
||||
)
|
||||
|
||||
temp_forecaster = Agent(
|
||||
agent_name="TempForecaster",
|
||||
system_prompt=maximum_temperature_forecast_agent(),
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
output_type=dict,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
stopping_token="<DONE>",
|
||||
tools=[request_ndfd_hourly],
|
||||
)
|
||||
|
||||
wind_watcher = Agent(
|
||||
agent_name="WindWatcher",
|
||||
system_prompt=wind_speed_forecast_agent(),
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
tools=[point_query_region],
|
||||
)
|
||||
|
||||
# Create a list
|
||||
agents = [
|
||||
temp_tracker,
|
||||
weather_narrator,
|
||||
rain_gauge,
|
||||
cloud_predictor,
|
||||
rain_forecaster,
|
||||
temp_forecaster,
|
||||
wind_watcher,
|
||||
]
|
||||
|
||||
# # Create a hierarchical swarm
|
||||
# swarm = HiearchicalSwarm(
|
||||
# name = "WeatherSwarm",
|
||||
# description="A swarm of weather agents",
|
||||
# agents=agents,
|
||||
# director =
|
||||
# )
|
@ -0,0 +1,50 @@
|
||||
from dotenv import load_dotenv
|
||||
from swarms import Agent, OpenAIChat
|
||||
|
||||
from weather_swarm.prompts import (
|
||||
FEW_SHORT_PROMPTS,
|
||||
GLOSSARY_PROMPTS,
|
||||
WEATHER_AGENT_SYSTEM_PROMPT,
|
||||
)
|
||||
from weather_swarm.tools.tools import (
|
||||
point_query,
|
||||
request_ndfd_basic,
|
||||
request_ndfd_hourly,
|
||||
)
|
||||
|
||||
# Load the environment variables
|
||||
load_dotenv()
|
||||
|
||||
|
||||
# Purpose = To generate weather information for the user and send API requests to the Baron Weather API
|
||||
agent = Agent(
|
||||
agent_name="WeatherMan Agent",
|
||||
system_prompt=WEATHER_AGENT_SYSTEM_PROMPT,
|
||||
sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS],
|
||||
# sop=list_tool_schemas_json,
|
||||
llm=OpenAIChat(),
|
||||
max_loops=1,
|
||||
# interactive=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
verbose=True,
|
||||
# Set the output type to the tool schema which is a BaseMode
|
||||
output_type=str, # or dict, or str
|
||||
tools=[
|
||||
# request_metar_nearest,
|
||||
point_query,
|
||||
request_ndfd_basic,
|
||||
# point_query_region,
|
||||
request_ndfd_hourly,
|
||||
],
|
||||
docs_folder="datasets", # Add every document in the datasets folder
|
||||
metadata="json",
|
||||
function_calling_format_type="OpenAI",
|
||||
function_calling_type="json",
|
||||
)
|
||||
|
||||
# Run the agent to generate the person's information
|
||||
# Run the agent to generate the person's information
|
||||
output = agent.run("Are there any chances of rain today in Huntsville?")
|
||||
# # Write the output to a new file
|
||||
# with open('output.txt', 'w') as f:
|
||||
# f.write(str(output))
|
@ -0,0 +1,145 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class RequestMetarNearest(BaseModel):
|
||||
latitude: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The latitude of the location for which the nearest METAR"
|
||||
" station is requested."
|
||||
),
|
||||
)
|
||||
longitude: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The longitude of the location for which the nearest"
|
||||
" METAR station is requested."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class PointQueryPrecipTotalAccum24Hr(BaseModel):
|
||||
layer: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The layer of the precipitation total accumulation in the"
|
||||
" last 24 hours."
|
||||
),
|
||||
)
|
||||
projection: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The projection of the location for which the"
|
||||
" precipitation total accumulation is requested."
|
||||
),
|
||||
)
|
||||
longitude: float = Field(
|
||||
...,
|
||||
description=(
|
||||
"The longitude of the location for which the"
|
||||
" precipitation total accumulation is requested."
|
||||
),
|
||||
)
|
||||
latitude: float = Field(
|
||||
...,
|
||||
description=(
|
||||
"The latitude of the location for which the precipitation"
|
||||
" total accumulation is requested."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class RequestNDFDBasic(BaseModel):
|
||||
latitude: float = Field(
|
||||
...,
|
||||
description=(
|
||||
"The latitude of the location for which the NDFD basic"
|
||||
" forecast is requested."
|
||||
),
|
||||
)
|
||||
longitude: float = Field(
|
||||
...,
|
||||
description=(
|
||||
"The longitude of the location for which the NDFD basic"
|
||||
" forecast is requested."
|
||||
),
|
||||
)
|
||||
forecast_time: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The forecast time for which the NDFD basic forecast is"
|
||||
" requested."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class PointQueryBaronHiresMaxReflectivityDbzAll(BaseModel):
|
||||
layer: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The layer of the maximum reflectivity in dBZ for all"
|
||||
" heights."
|
||||
),
|
||||
)
|
||||
projection: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The projection of the location for which the maximum"
|
||||
" reflectivity is requested."
|
||||
),
|
||||
)
|
||||
longitude: float = Field(
|
||||
...,
|
||||
description=(
|
||||
"The longitude of the location for which the maximum"
|
||||
" reflectivity is requested."
|
||||
),
|
||||
)
|
||||
latitude: float = Field(
|
||||
...,
|
||||
description=(
|
||||
"The latitude of the location for which the maximum"
|
||||
" reflectivity is requested."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class PointQueryBaronHiresWindSpeedMph10Meter(BaseModel):
|
||||
layer: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The layer of the wind speed in mph at 10 meters above"
|
||||
" ground level."
|
||||
),
|
||||
)
|
||||
projection: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The projection of the location for which the wind speed"
|
||||
" is requested."
|
||||
),
|
||||
)
|
||||
longitude: float = Field(
|
||||
...,
|
||||
description=(
|
||||
"The longitude of the location for which the wind speed"
|
||||
" is requested."
|
||||
),
|
||||
)
|
||||
latitude: float = Field(
|
||||
...,
|
||||
description=(
|
||||
"The latitude of the location for which the wind speed is"
|
||||
" requested."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _remove_a_key(d: dict, remove_key: str) -> None:
|
||||
"""Remove a key from a dictionary recursively"""
|
||||
if isinstance(d, dict):
|
||||
for key in list(d.keys()):
|
||||
if key == remove_key and "type" in d.keys():
|
||||
del d[key]
|
||||
else:
|
||||
_remove_a_key(d[key], remove_key)
|
@ -0,0 +1,109 @@
|
||||
import requests
|
||||
from typing import List, Dict, Any
|
||||
|
||||
|
||||
def fetch_geocode_by_city(
|
||||
api_key: str, city: str, timestamp: int, signature: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fetch geocode data by city name.
|
||||
|
||||
Args:
|
||||
api_key (str): The API key for authentication.
|
||||
city (str): The name of the city (e.g., "Austin, Tx").
|
||||
timestamp (int): The timestamp for the request.
|
||||
signature (str): The signature for the request.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: Geocode data for the specified city.
|
||||
|
||||
Raises:
|
||||
Exception: If the request fails or the response is invalid.
|
||||
"""
|
||||
url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/city.json"
|
||||
params = {"name": city, "ts": timestamp, "sig": signature}
|
||||
try:
|
||||
response = requests.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data.get("geocode", {}).get("data", [])
|
||||
except requests.RequestException as e:
|
||||
raise Exception(f"Failed to fetch geocode data by city: {e}")
|
||||
except ValueError:
|
||||
raise Exception("Invalid response format.")
|
||||
|
||||
|
||||
def fetch_geocode_by_address(
|
||||
api_key: str, address: str, timestamp: int, signature: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fetch geocode data by address.
|
||||
|
||||
Args:
|
||||
api_key (str): The API key for authentication.
|
||||
address (str): The address (e.g., "3305 Northland Dr, Austin, Tx").
|
||||
timestamp (int): The timestamp for the request.
|
||||
signature (str): The signature for the request.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: Geocode data for the specified address.
|
||||
|
||||
Raises:
|
||||
Exception: If the request fails or the response is invalid.
|
||||
"""
|
||||
url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/address.json"
|
||||
params = {"location": address, "ts": timestamp, "sig": signature}
|
||||
try:
|
||||
response = requests.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data.get("geocode", {}).get("data", [])
|
||||
except requests.RequestException as e:
|
||||
raise Exception(
|
||||
f"Failed to fetch geocode data by address: {e}"
|
||||
)
|
||||
except ValueError:
|
||||
raise Exception("Invalid response format.")
|
||||
|
||||
|
||||
def fetch_geocode_by_zip(
|
||||
api_key: str,
|
||||
zip_code: str,
|
||||
us: int,
|
||||
timestamp: int,
|
||||
signature: str,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fetch geocode data by zip code.
|
||||
|
||||
Args:
|
||||
api_key (str): The API key for authentication.
|
||||
zip_code (str): The zip code (e.g., "13060").
|
||||
us (int): Indicator for US zip code (1 for US, 0 for other).
|
||||
timestamp (int): The timestamp for the request.
|
||||
signature (str): The signature for the request.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: Geocode data for the specified zip code.
|
||||
|
||||
Raises:
|
||||
Exception: If the request fails or the response is invalid.
|
||||
"""
|
||||
url = f"https://api.velocityweather.com/v1/{api_key}/reports/geocode/zip.json"
|
||||
params = {
|
||||
"zip": zip_code,
|
||||
"us": us,
|
||||
"ts": timestamp,
|
||||
"sig": signature,
|
||||
}
|
||||
try:
|
||||
response = requests.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data.get("geocode", {}).get("data", [])
|
||||
except requests.RequestException as e:
|
||||
raise Exception(
|
||||
f"Failed to fetch geocode data by zip code: {e}"
|
||||
)
|
||||
except ValueError:
|
||||
raise Exception("Invalid response format.")
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,24 @@
|
||||
from typing import List
|
||||
|
||||
|
||||
def concat_strings(string_list: List[str]) -> str:
|
||||
"""
|
||||
Concatenates a list of strings into a single string.
|
||||
|
||||
Args:
|
||||
string_list (List[str]): A list of strings to be concatenated.
|
||||
|
||||
Returns:
|
||||
str: The concatenated string.
|
||||
|
||||
Raises:
|
||||
TypeError: If the input is not a list of strings.
|
||||
|
||||
"""
|
||||
if not isinstance(string_list, list):
|
||||
raise TypeError("Input must be a list of strings.")
|
||||
|
||||
try:
|
||||
return "".join(string_list)
|
||||
except TypeError:
|
||||
raise TypeError("All elements in the list must be strings.")
|
@ -0,0 +1,159 @@
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
from typing import List, Any
|
||||
|
||||
from swarms.structs.conversation import Conversation
|
||||
from pydantic import BaseModel
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
|
||||
class AgentRun(BaseModel):
|
||||
agent_name: str
|
||||
output: Any
|
||||
|
||||
|
||||
class Metadata(BaseModel):
|
||||
layers: int
|
||||
agent_runs: List[AgentRun]
|
||||
final_output: Any
|
||||
|
||||
|
||||
class MixtureOfAgents(BaseSwarm):
|
||||
"""
|
||||
Represents a mixture of agents in a swarm.
|
||||
The process is parallel -> sequential -> parallel -> final output agent.
|
||||
From the paper: https://arxiv.org/pdf/2406.04692
|
||||
|
||||
Attributes:
|
||||
agents (List[Agent]): The list of agents in the swarm.
|
||||
flow (str): The flow of the swarm.
|
||||
max_loops (int): The maximum number of loops to run.
|
||||
verbose (bool): Flag indicating whether to print verbose output.
|
||||
layers (int, optional): The number of layers in the swarm. Defaults to None.
|
||||
rules (str, optional): The rules for the swarm. Defaults to None.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str = "MixtureOfAgents",
|
||||
description: str = "A swarm of agents that run in parallel and sequentially.",
|
||||
agents: List[Agent] = None,
|
||||
max_loops: int = 1,
|
||||
verbose: bool = True,
|
||||
layers: int = None,
|
||||
rules: str = None,
|
||||
final_agent: Agent = None,
|
||||
auto_save: bool = False,
|
||||
saved_file_name: str = "moe_swarm.json",
|
||||
):
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.agents = agents
|
||||
self.max_loops = max_loops
|
||||
self.verbose = verbose
|
||||
self.layers = layers
|
||||
self.rules = rules
|
||||
self.final_agent = final_agent
|
||||
self.auto_save = auto_save
|
||||
self.saved_file_name = saved_file_name
|
||||
|
||||
# Check the agents
|
||||
self.agent_check()
|
||||
self.final_agent_check()
|
||||
|
||||
# Conversation
|
||||
self.conversation = Conversation(
|
||||
time_enabled=True,
|
||||
rules=rules,
|
||||
)
|
||||
|
||||
# Initialize the swarm
|
||||
self.swarm_initialization()
|
||||
|
||||
def agent_check(self):
|
||||
if not isinstance(self.agents, list):
|
||||
raise TypeError("Input must be a list of agents.")
|
||||
for agent in self.agents:
|
||||
if not isinstance(agent, Agent):
|
||||
raise TypeError(
|
||||
"Input must be a list of agents."
|
||||
"Each agent must be an instance of Agent."
|
||||
)
|
||||
|
||||
def final_agent_check(self):
|
||||
if not isinstance(self.final_agent, Agent):
|
||||
raise TypeError("Final agent must be an instance of Agent.")
|
||||
|
||||
def swarm_initialization(self):
|
||||
# Name, description, and logger
|
||||
logger.info(f"Initializing swarm {self.name}.")
|
||||
logger.info(f"Description: {self.description}")
|
||||
logger.info(f"Initializing swarm with {len(self.agents)} agents.")
|
||||
|
||||
def run(self, task: str = None, *args, **kwargs):
|
||||
try:
|
||||
# Running the swarm
|
||||
logger.info(f"Running swarm {self.name}.")
|
||||
|
||||
self.conversation.add("user", task)
|
||||
|
||||
# Conversation history
|
||||
history = self.conversation.return_history_as_string()
|
||||
|
||||
agent_runs = []
|
||||
layer = 0
|
||||
while layer < self.layers:
|
||||
logger.info(f"Running layer {layer} of the swarm.")
|
||||
# Different Layers
|
||||
# Run the agents for all agents on the input
|
||||
responses = []
|
||||
for agent in self.agents:
|
||||
out = agent.run(history, *args, **kwargs)
|
||||
responses.append((agent.agent_name, out))
|
||||
agent_runs.append(
|
||||
AgentRun(agent_name=agent.agent_name, output=out)
|
||||
)
|
||||
|
||||
# Log the agent run
|
||||
logger.info(f"Agent {agent.agent_name} output: {out}")
|
||||
|
||||
# Add all the responses to the conversation
|
||||
logger.info("Adding responses to the conversation.")
|
||||
for agent_name, response in responses:
|
||||
self.conversation.add(agent_name, response)
|
||||
|
||||
# Update the history
|
||||
history = self.conversation.return_history_as_string()
|
||||
|
||||
layer += 1
|
||||
|
||||
logger.info(f"Completed layer {layer} of the swarm.")
|
||||
|
||||
# Run the final output agent on the entire conversation history
|
||||
logger.info(
|
||||
"Running the final output agent on the conversation history."
|
||||
)
|
||||
final_output = self.final_agent.run(history, *args, **kwargs)
|
||||
self.conversation.add(
|
||||
self.final_agent.agent_name, final_output
|
||||
)
|
||||
|
||||
# Create metadata
|
||||
logger.info("Creating metadata for the swarm.")
|
||||
metadata = Metadata(
|
||||
layers=self.layers,
|
||||
agent_runs=agent_runs,
|
||||
final_output=final_output,
|
||||
)
|
||||
|
||||
# Save metadata to JSON file
|
||||
logger.info("Saving metadata to JSON file.")
|
||||
with open(self.saved_file_name, "w") as f:
|
||||
f.write(metadata.json())
|
||||
|
||||
return self.conversation.return_history_as_string()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error running swarm: {e} try optimizing the swarm inputs or re-iterate on the task."
|
||||
)
|
||||
return None
|
Loading…
Reference in new issue