pull/571/merge
Kye Gomez 5 months ago
parent 03789cce1e
commit 92fc156ca4

@ -109,7 +109,7 @@ agent = Agent(
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=True,
return_step_meta=False
)

@ -0,0 +1,240 @@
import logging
from typing import Any, Dict, Optional
import requests
from pydantic import BaseModel, Field
from swarms import OpenAIFunctionCaller, Conversation
from loguru import logger
import os
class APITaskSchema(BaseModel):
plan: str = Field(
...,
description="Plan out the API request to be executed, contemplate the endpoint, method, headers, body, and params.",
)
url: str = Field(
..., description="The API endpoint to send the request to."
)
method: str = Field(
...,
description="HTTP method to use for the request (e.g., GET, POST).",
)
headers: Optional[Dict[str, str]] = Field(
..., description="Optional headers to include in the request."
)
body: Optional[Dict[str, Any]] = Field(
..., description="Optional body content for POST requests."
)
params: Optional[Dict[str, Any]] = Field(
..., description="Optional query parameters for the request."
)
class APIRequestAgent:
"""
An agent that sends API requests based on user input.
Args:
name (str, optional): The name of the agent. Defaults to "APIRequestAgent".
description (str, optional): The description of the agent. Defaults to "An agent that sends API requests based on user input.".
schema (BaseModel, optional): The schema for the API task. Defaults to APITaskSchema.
temperature (int, optional): The temperature for the language model. Defaults to 0.5.
system_prompt (str, optional): The system prompt for the language model. Defaults to "You are an API request manager. Create and execute requests based on the user's needs.".
max_tokens (int, optional): The maximum number of tokens for the language model. Defaults to 4000.
full_agent_history (str, optional): The full agent history. Defaults to None.
max_loops (int, optional): The maximum number of loops for the agent. Defaults to 10.
Attributes:
name (str): The name of the agent.
description (str): The description of the agent.
schema (BaseModel): The schema for the API task.
session (requests.Session): The session for connection pooling.
system_prompt (str): The system prompt for the language model.
max_tokens (int): The maximum number of tokens for the language model.
full_agent_history (str): The full agent history.
max_loops (int): The maximum number of loops for the agent.
llm (OpenAIFunctionCaller): The function caller for the language model.
conversation (Conversation): The conversation object.
"""
def __init__(
self,
name: str = "APIRequestAgent",
description: str = "An agent that sends API requests based on user input.",
schema: BaseModel = APITaskSchema,
temperature: int = 0.5,
system_prompt: str = "You are an API request manager. Create and execute requests based on the user's needs.",
max_tokens: int = 4000,
full_agent_history: str = None,
max_loops: int = 10,
*args,
**kwargs,
):
# super().__init__(name=name, *args, **kwargs)
self.name = name
self.description = description
self.schema = schema
self.session = (
requests.Session()
) # Optional: Use a session for connection pooling.
self.system_prompt = system_prompt
self.max_tokens = max_tokens
self.full_agent_history = full_agent_history
self.max_loops = max_loops
# Initialize the function caller (LLM) with the schema
self.llm = OpenAIFunctionCaller(
system_prompt=system_prompt,
max_tokens=max_tokens,
temperature=temperature,
base_model=APITaskSchema,
parallel_tool_calls=False,
openai_api_key=os.getenv("OPENAI_API_KEY"),
)
# Conversation
self.conversation = Conversation(
time_enabled=True,
system_prompt=system_prompt,
)
# Full Agent history
self.full_agent_history = (
self.conversation.return_history_as_string()
)
def parse_response(
self, response: requests.Response
) -> Dict[str, Any]:
"""
Parses the API response and returns the content.
Args:
response (requests.Response): The API response to parse.
Returns:
Dict[str, Any]: The parsed response content.
"""
try:
logger.info(f"Response status code: {response.status_code}")
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
logging.error(f"HTTPError: {e}")
raise
except ValueError as e:
logging.error(f"Failed to parse JSON: {e}")
raise
def execute_request(self, task: APITaskSchema) -> Dict[str, Any]:
"""
Executes the API request based on the given task schema.
Args:
task (APITaskSchema): The task schema containing request details.
Returns:
Dict[str, Any]: The API response.
"""
base_url = task.url
url = f"{base_url}/{task.endpoint}"
method = task.method.upper()
logger.info(f"Executing request: {method} {url}")
try:
if method == "GET":
response = self.session.get(
url, headers=task.headers, params=task.params
)
elif method == "POST":
response = self.session.post(
url,
headers=task.headers,
json=task.body,
params=task.params,
)
elif method == "PUT":
response = self.session.put(
url,
headers=task.headers,
json=task.body,
params=task.params,
)
elif method == "DELETE":
response = self.session.delete(
url, headers=task.headers, params=task.params
)
elif method == "PATCH":
response = self.session.patch(
url,
headers=task.headers,
json=task.body,
params=task.params,
)
else:
raise ValueError(f"Unsupported HTTP method: {method}")
logging.info(f"Executed {method} request to {url}")
return self.parse_response(response)
except requests.exceptions.RequestException as e:
logging.error(f"RequestException: {e}")
raise
def execute_api_request(self, task: APITaskSchema) -> Dict[str, Any]:
"""
Executes a single step: sends the request and processes the response.
Args:
task (APITaskSchema): The task schema containing request details.
Returns:
Dict[str, Any]: The processed response from the API.
"""
logger.info(f"Executing API request based on task: {task}")
response = self.execute_request(task)
response = str(response)
# Log the response in the conversation
self.conversation.add(role="API", content=response)
return response
def run(self, task: str) -> Any:
"""
Runs the agent by processing a task string, and executing the requests.
Args:
task (str): The task to be processed by the LLM and executed by the agent.
Returns:
Any: The result of the task processed by the LLM.
"""
logger.info(f"Running agent with task: {task}")
output = self.llm.run(task)
# Log the output in the conversation
print(output)
print(type(output))
self.conversation.add(role=self.name, content=output)
# Convert dict -> APITaskSchema
output = APITaskSchema(**output)
logger.info(f"Executing request based on task: {output}")
return self.execute_api_request(output)
# Model
agent = APIRequestAgent(
name="APIRequestAgent",
description="An agent that sends API requests based on user input.",
schema=APITaskSchema,
system_prompt="You are an API request manager. Create and execute requests based on the user's needs.",
)
agent.run("Send an API request to an open source API")
print(agent.full_agent_history)

@ -227,6 +227,8 @@ nav:
- Add Agents: "swarms_platform/agents/agents_api.md"
- Query Agents: "swarms_platform/agents/fetch_agents.md"
- Edit Agents: "swarms_platform/agents/edit_agent.md"
- Telemetry API:
- PUT: "swarms_platform/telemetry/index.md"
# - Tools API:
# - Overview: "swarms_platform/tools_api.md"
# - Add Tools: "swarms_platform/fetch_tools.md"

@ -0,0 +1,196 @@
# Swarms Telemetry API Documentation
This documentation covers the API for handling telemetry data. The API is implemented using Next.js, Supabase for data storage, and Zod for request validation. The handler processes incoming telemetry data, validates it, and stores it in a Supabase database. The handler also includes robust error handling and retries for database insertions to ensure data reliability.
## Endpoint
- **URL:** `/api/telemetry`
- **Method:** `POST`
- **Content-Type:** `application/json`
- **Description:** Receives telemetry data and stores it in the Supabase database.
## Request Schema
The API expects a JSON object in the request body that matches the following schema, validated using Zod:
| Field Name | Type | Required | Description |
|---------------------|----------|----------|-----------------------------------------------------------|
| `data` | `any` | No | Telemetry data payload. |
| `swarms_api_key` | `string` | No | API key associated with the swarms framework. |
| `status` | `string` | No | Status of the telemetry data. Default is `'received'`. |
| `processing_time` | `string` | No | Time taken to process the telemetry data. |
## Response
### Success Response
- **Status Code:** `200 OK`
- **Content-Type:** `application/json`
- **Body:**
```json
{
"message": "Telemetry data received and stored successfully"
}
```
### Error Responses
- **Status Code:** `400 Bad Request`
- **Content-Type:** `application/json`
- **Body:**
```json
{
"error": "Invalid data format",
"details": [
// Zod validation error details
]
}
```
- **Status Code:** `405 Method Not Allowed`
- **Content-Type:** `application/json`
- **Body:**
```json
{
"error": "Method Not Allowed"
}
```
- **Status Code:** `500 Internal Server Error`
- **Content-Type:** `application/json`
- **Body:**
```json
{
"error": "Internal Server Error",
"details": "Error message"
}
```
## Example Usage
### Python (Using `requests` Library)
```python
import requests
url = "https://swarms.world/api/telemetry"
headers = {
"Content-Type": "application/json"
}
data = {
"data": {"example_key": "example_value"},
"swarms_api_key": "your_swarms_api_key",
"status": "received",
"processing_time": "123ms"
}
response = requests.post(url, json=data, headers=headers)
print(response.status_code)
print(response.json())
```
### Node.js (Using `axios` Library)
```javascript
const axios = require('axios');
const url = 'https://swarms.world/api/telemetry';
const data = {
data: { example_key: 'example_value' },
swarms_api_key: 'your_swarms_api_key',
status: 'received',
processing_time: '123ms'
};
axios.post(url, data)
.then(response => {
console.log(response.status);
console.log(response.data);
})
.catch(error => {
console.error(error.response.status);
console.error(error.response.data);
});
```
### Go (Using `net/http` and `encoding/json`)
```go
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
)
func main() {
url := "https://swarms.world/api/telemetry"
data := map[string]interface{}{
"data": map[string]interface{}{"example_key": "example_value"},
"swarms_api_key": "your_swarms_api_key",
"status": "received",
"processing_time": "123ms",
}
jsonData, err := json.Marshal(data)
if err != nil {
fmt.Println("Error marshaling JSON:", err)
return
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
fmt.Println("Error creating request:", err)
return
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error making request:", err)
return
}
defer resp.Body.Close()
fmt.Println("Response status:", resp.Status)
}
```
### cURL Command
```bash
curl -X POST https://swarms.world/api/telemetry \
-H "Content-Type: application/json" \
-d '{
"data": {"example_key": "example_value"},
"swarms_api_key": "your_swarms_api_key",
"status": "received",
"processing_time": "123ms"
}'
```
### Supabase Table Structure
The Supabase table (presumably `swarms_framework_schema`) should have the following columns:
- **`data`**: JSONB or TEXT - Stores the telemetry data payload.
- **`swarms_api_key`**: TEXT - Stores the API key associated with the data.
- **`source_ip`**: TEXT - Stores the IP address of the request source.
- **`status`**: TEXT - Stores the status of the data processing.
- **`processing_time`**: TEXT - Stores the time taken to process the telemetry data.
## References and Further Reading
- [Next.js API Routes Documentation](https://nextjs.org/docs/api-routes/introduction)
- [Supabase JavaScript Client](https://supabase.com/docs/reference/javascript/supabase-client)
- [Zod Schema Validation](https://zod.dev/)
- [OpenAPI Specification](https://swagger.io/specification/)
This documentation is designed to be thorough and provide all the necessary details for developers to effectively use and integrate with the telemetry API.

@ -26,7 +26,7 @@ agent = Agent(
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=True,
return_step_meta=False,
)

@ -0,0 +1,57 @@
import json
import os
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
import asyncio
from swarms.telemetry.async_log_telemetry import send_telemetry
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent-General-11",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
# interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
# tools=[#Add your functions here# ],
# stopping_token="Stop!",
# docs_folder="docs", # Enter your folder name
# pdf_path="docs/finance_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
user_name="swarms_corp",
# # docs="",
retry_attempts=3,
# context_length=1000,
# tool_schema = dict
context_length=200000,
tool_system_prompt=None,
)
# # Convert the agent object to a dictionary
data = agent.to_dict()
data = json.dumps(data)
# Async
async def send_data():
response_status, response_data = await send_telemetry(data)
print(response_status, response_data)
# Run the async function
asyncio.run(send_data())

@ -91,13 +91,6 @@ agent = Agent(
# interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
# tools=[#Add your functions here# ],
# stopping_token="Stop!",
# interactive=True,
# docs_folder="docs", # Enter your folder name
# pdf_path="docs/finance_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
user_name="swarms_corp",
# # docs=
# # docs_folder="docs",
@ -105,7 +98,7 @@ agent = Agent(
# context_length=1000,
# tool_schema = dict
context_length=200000,
return_step_meta=True,
return_step_meta=False,
tools=[fetch_transactions],
)

@ -0,0 +1,189 @@
import os
from typing import Optional
import requests
from pydantic import BaseModel, Field
class SubmitPullRequestSchema(BaseModel):
# repo_owner: str = Field(
# "kyegomez",
# example="kyegomez",
# description="The owner of the GitHub repository.",
# )
# repo_name: str = Field(
# "swarms",
# example="swarms",
# description="The name of the GitHub repository.",
# )
file_path: str = Field(
...,
example="README.md",
description="The path to the file within the repository.",
)
new_content: str = Field(
...,
description="The new content to be written to the file.",
example="New content for the file.",
)
commit_message: str = Field(
...,
description="The commit message for the change.",
example="Updated README.md content",
)
pr_title: str = Field(
...,
description="The title of the pull request.",
example="Update README.md",
)
pr_body: Optional[str] = Field(
None,
description="The body of the pull request.",
example="This PR improves the README.md content.",
)
class Config:
schema_extra = {
"example": {
# "repo_owner": "kyegomez",
# "repo_name": "swarms",
"file_path": "README.md",
"new_content": "New content for the file.",
"commit_message": "Updated README.md content",
"pr_title": "Update README.md",
"pr_body": "This PR improves the README.md content.",
}
}
class GetFileContentSchema(BaseModel):
repo_owner: str = Field(
...,
example="kyegomez",
description="The owner of the GitHub repository.",
)
repo_name: str = Field(
...,
example="swarms",
description="The name of the GitHub repository.",
)
file_path: str = Field(
...,
example="README.md",
description="The path to the file within the repository.",
)
branch: str = Field(
default="main",
example="main",
description="The branch name to fetch the file from.",
)
class Config:
schema_extra = {
"example": {
"repo_owner": "kyegomez",
"repo_name": "swarms",
"file_path": "README.md",
"branch": "main",
}
}
def get_github_file_content(
file_path: str,
repo_owner: str = "kyegomez",
repo_name: str = "swarms",
branch: str = "main",
) -> str:
"""
Fetches the content of a file from a GitHub repository.
Args:
repo_owner (str): The owner of the repository (e.g., 'kyegomez').
repo_name (str): The name of the repository (e.g., 'swarms').
file_path (str): The path to the file within the repository.
branch (str): The branch name (default is 'main').
Returns:
str: The content of the file as a string.
Raises:
requests.exceptions.RequestException: If there is an error with the request.
ValueError: If the file content cannot be decoded.
"""
url = f"https://raw.githubusercontent.com/{repo_owner}/{repo_name}/{branch}/{file_path}"
try:
response = requests.get(url)
response.raise_for_status()
return response.text
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
raise
except ValueError as e:
print(f"Error decoding file content: {e}")
raise
# out = get_github_file_content("README.md")
# print(out)
def submit_pull_request(
file_path: str,
new_content: str,
commit_message: str,
pr_title: str,
pr_body: Optional[str] = None,
repo_owner: str = "kyegomez",
repo_name: str = "swarms",
) -> None:
"""
Submits a pull request to a GitHub repository by modifying a specified file.
Args:
token (str): GitHub personal access token.
repo_owner (str): The owner of the repository (e.g., 'kyegomez').
repo_name (str): The name of the repository (e.g., 'swarms').
file_path (str): The path to the file within the repository.
new_content (str): The new content to write to the file.
commit_message (str): The commit message for the change.
pr_title (str): The title of the pull request.
pr_body (Optional[str]): The body of the pull request (default is None).
Raises:
Exception: If any error occurs during the process.
"""
try:
from github import Github
token = os.getenv("GITHUB_TOKEN")
g = Github(token)
repo = g.get_repo(f"{repo_owner}/{repo_name}")
# Get the file
contents = repo.get_contents(file_path)
current_branch = repo.get_branch("main")
# Create a new branch
new_branch = "modify_" + file_path.replace("/", "_").replace(
".", "_"
)
repo.create_git_ref(
ref=f"refs/heads/{new_branch}", sha=current_branch.commit.sha
)
# Update the file
repo.update_file(
contents.path,
commit_message,
new_content,
contents.sha,
branch=new_branch,
)
# Create a pull request
repo.create_pull(
title=pr_title, body=pr_body, head=new_branch, base="main"
)
print("Pull request created successfully.")
except Exception as e:
print(f"Error: {e}")
raise

@ -0,0 +1,200 @@
import os
from typing import List
from loguru import logger
from pydantic import BaseModel, Field
from swarms import OpenAIFunctionCaller, create_file_in_folder
class PromptUseCase(BaseModel):
title: str = Field(
...,
description="The name of the use case.",
)
description: str = Field(
...,
description="The description of the use case.",
)
class PromptSchema(BaseModel):
name: str = Field(
...,
description="The name of the prompt.",
)
prompt: str = Field(
...,
description="The prompt to generate the response.",
)
description: str = Field(
...,
description="The description of the prompt.",
)
tags: str = Field(
...,
description="The tags for the prompt denoted by a comma sign: Code Gen Prompt, Pytorch Code Gen Agent Prompt, Finance Agent Prompt, ",
)
useCases: List[PromptUseCase] = Field(
...,
description="The use cases for the prompt.",
)
class PromptGeneratorAgent:
"""
A class that generates prompts based on given tasks and publishes them to the marketplace.
Args:
system_prompt (str, optional): The system prompt to use. Defaults to None.
max_tokens (int, optional): The maximum number of tokens in the generated prompt. Defaults to 1000.
temperature (float, optional): The temperature value for controlling randomness in the generated prompt. Defaults to 0.5.
schema (BaseModel, optional): The base model schema to use. Defaults to PromptSchema.
Attributes:
llm (OpenAIFunctionCaller): An instance of the OpenAIFunctionCaller class for making function calls to the OpenAI API.
Methods:
clean_model_code: Cleans the model code by removing extra escape characters, newlines, and unnecessary whitespaces.
upload_to_marketplace: Uploads the generated prompt data to the marketplace.
run: Creates a prompt based on the given task and publishes it to the marketplace.
"""
def __init__(
self,
system_prompt: str = None,
max_tokens: int = 4000,
temperature: float = 0.5,
schema: BaseModel = PromptSchema,
):
self.llm = OpenAIFunctionCaller(
system_prompt=system_prompt,
max_tokens=max_tokens,
temperature=temperature,
base_model=schema,
parallel_tool_calls=False,
)
def clean_model_code(self, model_code_str: str) -> str:
"""
Cleans the model code by removing extra escape characters, newlines, and unnecessary whitespaces.
Args:
model_code_str (str): The model code string to clean.
Returns:
str: The cleaned model code.
"""
cleaned_code = model_code_str.replace("\\n", "\n").replace(
"\\'", "'"
)
cleaned_code = cleaned_code.strip()
return cleaned_code
def upload_to_marketplace(self, data: dict) -> dict:
"""
Uploads the generated prompt data to the marketplace.
Args:
data (dict): The prompt data to upload.
Returns:
dict: The response from the marketplace API.
"""
import json
import requests
url = "https://swarms.world/api/add-prompt"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {os.getenv('SWARMS_API_KEY')}",
}
response = requests.post(
url, headers=headers, data=json.dumps(data)
)
return str(response.json())
def run(self, task: str) -> str:
"""
Creates a prompt based on the given task and publishes it to the marketplace.
Args:
task (str): The task description for generating the prompt.
Returns:
dict: The response from the marketplace API after uploading the prompt.
"""
out = self.llm.run(task)
name = out["name"]
logger.info(f"Prompt generated: {out}")
create_file_in_folder(
"auto_generated_prompts", f"prompt_{name}.json", str(out)
)
logger.info(f"Prompt saved to file: prompt_{name}.json")
# Clean the model code
prompt = out["prompt"]
description = out["description"]
tags = out["tags"]
useCases = out["useCases"]
data = {
"name": name,
"prompt": self.clean_model_code(prompt),
"description": description,
"tags": tags,
"useCases": useCases,
}
create_file_in_folder(
"auto_generated_prompts",
f"prompt_{name}.json",
str(data),
)
# Now submit to swarms API
logger.info("Uploading to marketplace...")
return self.upload_to_marketplace(data)
# Example usage:
system_prompt = """
**System Prompt for Prompt Creator Agent**
---
**Role**: You are a highly skilled prompt creator agent with expertise in designing effective agents to solve complex business problems. Your primary function is to generate prompts that result in agents capable of executing business tasks with precision, efficiency, and scalability.
**Objective**: Your goal is to create prompts that follow a structured format, ensuring that the resulting agents are well-informed, reliable, and able to perform specific tasks in business environments. These tasks might include automating processes, analyzing data, generating content, or making strategic decisions.
### **Prompt Structure Guidelines**:
1. **Instructions**: Begin by clearly stating the objective of the agent. The instructions should outline what the agent is expected to accomplish, providing a high-level overview of the desired outcome. Be concise but comprehensive, ensuring the agent understands the broader context of the task.
2. **Examples**: After the instructions, provide several examples (known as "many-shot examples") to demonstrate how the agent should approach the task. Each example should include:
- **Input**: A specific scenario or task the agent might encounter.
- **Expected Output**: The correct or optimal response the agent should generate in that scenario.
Use a variety of examples that cover different potential cases the agent might face, ensuring the agent can generalize from the examples provided.
3. **Standard Operating Procedures (SOPs)**: For tasks that require detailed, step-by-step guidance, include a comprehensive SOP. This should be a long-form set of instructions that breaks down the task into manageable steps. The SOP should:
- Outline each step in a sequential manner.
- Provide specific guidelines, best practices, and considerations for each step.
- Include examples or mini-tutorials where necessary to ensure clarity.
4. **Error Handling**: Include guidance on how the agent should handle potential errors or uncertainties. This might involve instructions on when to seek additional input, how to flag issues, or how to prioritize tasks when resources are limited.
5. **Adaptability**: Ensure that the prompts encourage the agent to adapt to changing circumstances. This might include instructions on how to modify its approach based on real-time feedback, how to update its knowledge base, or how to learn from previous mistakes.
"""
agent = PromptGeneratorAgent(system_prompt=system_prompt, max_tokens=4000)
response = agent.run(
"Create a prompt for an agent to analyze complicated cashflow statements and generate a summary report."
)
print(response)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "5.6.0"
version = "5.6.2"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -99,7 +99,9 @@ class Gemini(BaseMultiModalModel):
self.system_prompt = system_prompt
# Configure the API key
genai.configure(api_key=gemini_api_key, transport=transport)
genai.configure(
api_key=gemini_api_key, transport=transport, *args, **kwargs
)
# Prepare the generation config
self.generation_config = GenerationConfig(

@ -368,7 +368,7 @@ class Agent:
# Name
self.name = agent_name
self.description = agent_description
# Agentic stuff
self.reply = ""
self.question = None
@ -733,8 +733,9 @@ class Agent:
all_responses.append(response)
# Log the step
out_step = self.log_step_metadata(response)
steps_pool.append(out_step)
if self.return_step_meta is True:
out_step = self.log_step_metadata(response)
steps_pool.append(out_step)
# TODO: Implement reliablity check
if self.tools is not None:
@ -2013,9 +2014,8 @@ class Agent:
return (
f"Model saved to {self.workspace_dir}/{self.agent_name}.yaml"
)
# def publish_agent_to_marketplace(self):
# import requests
# import requests
# # Prepare the data

@ -0,0 +1,64 @@
import json
import os
from loguru import logger
import aiohttp
async def send_telemetry(
data: dict,
swarms_api_key: str = None,
):
"""
send_telemetry sends the data to the SWARMS API for logging.
Args:
data (dict): The data to be logged.
swarms_api_key (str, optional): The SWARMS API key. Defaults to None.
Returns:
tuple: The response status and data from the API.
Example:
data = {
"user_id": "123",
"action": "login",
"timestamp": "2022-01-01T00:00:00Z",
}
response_status, response_data = await send_telemetry(data)
"""
url = "https://swarms.world/api/add-telemetry"
if not swarms_api_key:
swarms_api_key = get_swarms_api_key()
session = aiohttp.ClientSession()
headers = {"Content-Type": "application/json"}
payload = {"data": data, "swarms_api_key": swarms_api_key}
payload = json.dumps(payload)
try:
logger.debug(f"Sending data to {url} with payload: {payload}")
async with session.post(
url, json=payload, headers=headers
) as response:
response_status = response.status
response_data = await response.json()
logger.info(
f"Received response: {response_status} - {response_data}"
)
return response_status, response_data
except Exception as e:
logger.error(f"Error during request: {str(e)}")
raise
def get_swarms_api_key():
"""Fetch the SWARMS_API_KEY environment variable or prompt the user for it."""
swarms_api_key = os.getenv("SWARMS_API_KEY")
return swarms_api_key
Loading…
Cancel
Save