parent
558c2f5831
commit
a3879c5322
@ -1,291 +0,0 @@
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Optional, Any
|
||||
from dataclasses import dataclass
|
||||
import requests
|
||||
import time
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||
handlers=[
|
||||
logging.FileHandler("api_tests.log"),
|
||||
logging.StreamHandler(),
|
||||
],
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Configuration
|
||||
@dataclass
|
||||
class TestConfig:
|
||||
"""Test configuration settings"""
|
||||
|
||||
base_url: str
|
||||
timeout: int = 30
|
||||
verify_ssl: bool = True
|
||||
debug: bool = True
|
||||
|
||||
|
||||
# Load config from environment or use defaults
|
||||
config = TestConfig(
|
||||
base_url=os.getenv("API_BASE_URL", "http://0.0.0.0:8000/v1")
|
||||
)
|
||||
|
||||
|
||||
class APIClient:
|
||||
"""API Client for testing"""
|
||||
|
||||
def __init__(self, config: TestConfig):
|
||||
self.config = config
|
||||
self.session = requests.Session()
|
||||
|
||||
def _url(self, path: str) -> str:
|
||||
"""Construct full URL"""
|
||||
return f"{self.config.base_url}/{path.lstrip('/')}"
|
||||
|
||||
def _log_request_details(
|
||||
self, method: str, url: str, headers: Dict, data: Any
|
||||
):
|
||||
"""Log request details for debugging"""
|
||||
logger.info("\nRequest Details:")
|
||||
logger.info(f"Method: {method}")
|
||||
logger.info(f"URL: {url}")
|
||||
logger.info(f"Headers: {json.dumps(headers, indent=2)}")
|
||||
logger.info(
|
||||
f"Data: {json.dumps(data, indent=2) if data else None}"
|
||||
)
|
||||
|
||||
def _log_response_details(self, response: requests.Response):
|
||||
"""Log response details for debugging"""
|
||||
logger.info("\nResponse Details:")
|
||||
logger.info(f"Status Code: {response.status_code}")
|
||||
logger.info(
|
||||
f"Headers: {json.dumps(dict(response.headers), indent=2)}"
|
||||
)
|
||||
try:
|
||||
logger.info(
|
||||
f"Body: {json.dumps(response.json(), indent=2)}"
|
||||
)
|
||||
except Exception:
|
||||
logger.info(f"Body: {response.text}")
|
||||
|
||||
def _request(
|
||||
self,
|
||||
method: str,
|
||||
path: str,
|
||||
headers: Optional[Dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> requests.Response:
|
||||
"""Make HTTP request with config defaults"""
|
||||
url = self._url(path)
|
||||
headers = headers or {}
|
||||
|
||||
if self.config.debug:
|
||||
self._log_request_details(
|
||||
method, url, headers, kwargs.get("json")
|
||||
)
|
||||
|
||||
try:
|
||||
response = self.session.request(
|
||||
method=method,
|
||||
url=url,
|
||||
headers=headers,
|
||||
timeout=self.config.timeout,
|
||||
verify=self.config.verify_ssl,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if self.config.debug:
|
||||
self._log_response_details(response)
|
||||
|
||||
if response.status_code >= 400:
|
||||
logger.error(
|
||||
f"Request failed with status {response.status_code}"
|
||||
)
|
||||
logger.error(f"Response: {response.text}")
|
||||
|
||||
response.raise_for_status()
|
||||
return response
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Request failed: {str(e)}")
|
||||
if hasattr(e, "response") and e.response is not None:
|
||||
logger.error(f"Error response: {e.response.text}")
|
||||
raise
|
||||
|
||||
|
||||
class TestRunner:
|
||||
"""Test runner with logging and reporting"""
|
||||
|
||||
def __init__(self):
|
||||
self.client = APIClient(config)
|
||||
self.results = {"passed": 0, "failed": 0, "total_time": 0}
|
||||
self.api_key = None
|
||||
self.user_id = None
|
||||
self.agent_id = None
|
||||
|
||||
def run_test(self, test_name: str, test_func: callable):
|
||||
"""Run a single test with timing and logging"""
|
||||
logger.info(f"\nRunning test: {test_name}")
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
test_func()
|
||||
self.results["passed"] += 1
|
||||
logger.info(f"✅ {test_name} - PASSED")
|
||||
except Exception as e:
|
||||
self.results["failed"] += 1
|
||||
logger.error(f"❌ {test_name} - FAILED: {str(e)}")
|
||||
logger.exception(e)
|
||||
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
self.results["total_time"] += duration
|
||||
logger.info(f"Test duration: {duration:.2f}s")
|
||||
|
||||
def test_user_creation(self):
|
||||
"""Test user creation"""
|
||||
response = self.client._request(
|
||||
"POST", "/users", json={"username": "test_user"}
|
||||
)
|
||||
data = response.json()
|
||||
assert "user_id" in data, "No user_id in response"
|
||||
assert "api_key" in data, "No api_key in response"
|
||||
self.api_key = data["api_key"]
|
||||
self.user_id = data["user_id"]
|
||||
logger.info(f"Created user with ID: {self.user_id}")
|
||||
|
||||
def test_create_api_key(self):
|
||||
"""Test API key creation"""
|
||||
headers = {"api-key": self.api_key}
|
||||
response = self.client._request(
|
||||
"POST",
|
||||
f"/users/{self.user_id}/api-keys",
|
||||
headers=headers,
|
||||
json={"name": "test_key"},
|
||||
)
|
||||
data = response.json()
|
||||
assert "key" in data, "No key in response"
|
||||
logger.info("Successfully created new API key")
|
||||
|
||||
def test_create_agent(self):
|
||||
"""Test agent creation"""
|
||||
headers = {"api-key": self.api_key}
|
||||
agent_config = {
|
||||
"agent_name": "test_agent",
|
||||
"model_name": "gpt-4",
|
||||
"system_prompt": "You are a test agent",
|
||||
"description": "Test agent description",
|
||||
"temperature": 0.7,
|
||||
"max_loops": 1,
|
||||
}
|
||||
response = self.client._request(
|
||||
"POST", "/agent", headers=headers, json=agent_config
|
||||
)
|
||||
data = response.json()
|
||||
assert "agent_id" in data, "No agent_id in response"
|
||||
self.agent_id = data["agent_id"]
|
||||
logger.info(f"Created agent with ID: {self.agent_id}")
|
||||
|
||||
# Wait a bit for agent to be ready
|
||||
time.sleep(2)
|
||||
|
||||
def test_list_agents(self):
|
||||
"""Test agent listing"""
|
||||
headers = {"api-key": self.api_key}
|
||||
response = self.client._request(
|
||||
"GET", "/agents", headers=headers
|
||||
)
|
||||
agents = response.json()
|
||||
assert isinstance(agents, list), "Response is not a list"
|
||||
assert len(agents) > 0, "No agents returned"
|
||||
logger.info(f"Successfully retrieved {len(agents)} agents")
|
||||
|
||||
def test_agent_completion(self):
|
||||
"""Test agent completion"""
|
||||
if not self.agent_id:
|
||||
logger.error("No agent_id available for completion test")
|
||||
raise ValueError("Agent ID not set")
|
||||
|
||||
headers = {"api-key": self.api_key}
|
||||
completion_request = {
|
||||
"prompt": "Write 'Hello World!'",
|
||||
"agent_id": str(
|
||||
self.agent_id
|
||||
), # Ensure UUID is converted to string
|
||||
"max_tokens": 100,
|
||||
"stream": False,
|
||||
"temperature_override": 0.7,
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Sending completion request for agent {self.agent_id}"
|
||||
)
|
||||
response = self.client._request(
|
||||
"POST",
|
||||
"/agent/completions",
|
||||
headers=headers,
|
||||
json=completion_request,
|
||||
)
|
||||
data = response.json()
|
||||
assert "response" in data, "No response in completion"
|
||||
logger.info(f"Completion response: {data.get('response')}")
|
||||
|
||||
def run_all_tests(self):
|
||||
"""Run all tests and generate report"""
|
||||
logger.info("\n" + "=" * 50)
|
||||
logger.info("Starting API test suite...")
|
||||
logger.info(f"Base URL: {config.base_url}")
|
||||
logger.info("=" * 50 + "\n")
|
||||
|
||||
# Define test sequence
|
||||
tests = [
|
||||
("User Creation", self.test_user_creation),
|
||||
("API Key Creation", self.test_create_api_key),
|
||||
("Agent Creation", self.test_create_agent),
|
||||
("List Agents", self.test_list_agents),
|
||||
("Agent Completion", self.test_agent_completion),
|
||||
]
|
||||
|
||||
# Run tests
|
||||
for test_name, test_func in tests:
|
||||
self.run_test(test_name, test_func)
|
||||
|
||||
# Generate report
|
||||
self.print_report()
|
||||
|
||||
def print_report(self):
|
||||
"""Print test results report"""
|
||||
total_tests = self.results["passed"] + self.results["failed"]
|
||||
success_rate = (
|
||||
(self.results["passed"] / total_tests * 100)
|
||||
if total_tests > 0
|
||||
else 0
|
||||
)
|
||||
|
||||
report = f"""
|
||||
\n{'='*50}
|
||||
API TEST RESULTS
|
||||
{'='*50}
|
||||
Total Tests: {total_tests}
|
||||
Passed: {self.results['passed']} ✅
|
||||
Failed: {self.results['failed']} ❌
|
||||
Success Rate: {success_rate:.2f}%
|
||||
Total Time: {self.results['total_time']:.2f}s
|
||||
{'='*50}
|
||||
"""
|
||||
logger.info(report)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
runner = TestRunner()
|
||||
runner.run_all_tests()
|
||||
except KeyboardInterrupt:
|
||||
logger.info("\nTest suite interrupted by user")
|
||||
except Exception as e:
|
||||
logger.error(f"Test suite failed: {str(e)}")
|
||||
logger.exception(e)
|
@ -1,254 +0,0 @@
|
||||
import os
|
||||
from typing import Dict, Optional, Any
|
||||
from dataclasses import dataclass
|
||||
import pytest
|
||||
import requests
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel
|
||||
from _pytest.terminal import TerminalReporter
|
||||
|
||||
|
||||
# Configuration
|
||||
@dataclass
|
||||
class TestConfig:
|
||||
"""Test configuration settings"""
|
||||
|
||||
base_url: str
|
||||
timeout: int = 30
|
||||
verify_ssl: bool = True
|
||||
|
||||
|
||||
# Load config from environment or use defaults
|
||||
config = TestConfig(
|
||||
base_url=os.getenv("API_BASE_URL", "http://localhost:8000/v1")
|
||||
)
|
||||
|
||||
|
||||
# API Response Types
|
||||
class UserResponse(BaseModel):
|
||||
user_id: str
|
||||
api_key: str
|
||||
|
||||
|
||||
class AgentResponse(BaseModel):
|
||||
agent_id: UUID
|
||||
|
||||
|
||||
class MetricsResponse(BaseModel):
|
||||
total_completions: int
|
||||
average_response_time: float
|
||||
error_rate: float
|
||||
last_24h_completions: int
|
||||
total_tokens_used: int
|
||||
uptime_percentage: float
|
||||
success_rate: float
|
||||
peak_tokens_per_minute: int
|
||||
|
||||
|
||||
class APIClient:
|
||||
"""API Client with typed methods"""
|
||||
|
||||
def __init__(self, config: TestConfig):
|
||||
self.config = config
|
||||
self.session = requests.Session()
|
||||
|
||||
def _url(self, path: str) -> str:
|
||||
"""Construct full URL"""
|
||||
return f"{self.config.base_url}/{path.lstrip('/')}"
|
||||
|
||||
def _request(
|
||||
self,
|
||||
method: str,
|
||||
path: str,
|
||||
headers: Optional[Dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> requests.Response:
|
||||
"""Make HTTP request with config defaults"""
|
||||
url = self._url(path)
|
||||
return self.session.request(
|
||||
method=method,
|
||||
url=url,
|
||||
headers=headers,
|
||||
timeout=self.config.timeout,
|
||||
verify=self.config.verify_ssl,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def create_user(self, username: str) -> UserResponse:
|
||||
"""Create a new user"""
|
||||
response = self._request(
|
||||
"POST", "/users", json={"username": username}
|
||||
)
|
||||
response.raise_for_status()
|
||||
return UserResponse(**response.json())
|
||||
|
||||
def create_agent(
|
||||
self, agent_config: Dict[str, Any], api_key: str
|
||||
) -> AgentResponse:
|
||||
"""Create a new agent"""
|
||||
headers = {"api-key": api_key}
|
||||
response = self._request(
|
||||
"POST", "/agent", headers=headers, json=agent_config
|
||||
)
|
||||
response.raise_for_status()
|
||||
return AgentResponse(**response.json())
|
||||
|
||||
def get_metrics(
|
||||
self, agent_id: UUID, api_key: str
|
||||
) -> MetricsResponse:
|
||||
"""Get agent metrics"""
|
||||
headers = {"api-key": api_key}
|
||||
response = self._request(
|
||||
"GET", f"/agent/{agent_id}/metrics", headers=headers
|
||||
)
|
||||
response.raise_for_status()
|
||||
return MetricsResponse(**response.json())
|
||||
|
||||
|
||||
# Test Fixtures
|
||||
@pytest.fixture
|
||||
def api_client() -> APIClient:
|
||||
"""Fixture for API client"""
|
||||
return APIClient(config)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_user(api_client: APIClient) -> UserResponse:
|
||||
"""Fixture for test user"""
|
||||
return api_client.create_user("test_user")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_agent(
|
||||
api_client: APIClient, test_user: UserResponse
|
||||
) -> AgentResponse:
|
||||
"""Fixture for test agent"""
|
||||
agent_config = {
|
||||
"agent_name": "test_agent",
|
||||
"model_name": "gpt-4",
|
||||
"system_prompt": "You are a test agent",
|
||||
"description": "Test agent description",
|
||||
}
|
||||
return api_client.create_agent(agent_config, test_user.api_key)
|
||||
|
||||
|
||||
# Tests
|
||||
def test_user_creation(api_client: APIClient):
|
||||
"""Test user creation flow"""
|
||||
response = api_client.create_user("new_test_user")
|
||||
assert response.user_id
|
||||
assert response.api_key
|
||||
|
||||
|
||||
def test_agent_creation(
|
||||
api_client: APIClient, test_user: UserResponse
|
||||
):
|
||||
"""Test agent creation flow"""
|
||||
agent_config = {
|
||||
"agent_name": "test_agent",
|
||||
"model_name": "gpt-4",
|
||||
"system_prompt": "You are a test agent",
|
||||
"description": "Test agent description",
|
||||
}
|
||||
response = api_client.create_agent(
|
||||
agent_config, test_user.api_key
|
||||
)
|
||||
assert response.agent_id
|
||||
|
||||
|
||||
def test_agent_metrics(
|
||||
api_client: APIClient,
|
||||
test_user: UserResponse,
|
||||
test_agent: AgentResponse,
|
||||
):
|
||||
"""Test metrics retrieval"""
|
||||
metrics = api_client.get_metrics(
|
||||
test_agent.agent_id, test_user.api_key
|
||||
)
|
||||
assert metrics.total_completions >= 0
|
||||
assert metrics.error_rate >= 0
|
||||
assert metrics.uptime_percentage >= 0
|
||||
|
||||
|
||||
def test_invalid_auth(api_client: APIClient):
|
||||
"""Test invalid authentication"""
|
||||
with pytest.raises(requests.exceptions.HTTPError) as exc_info:
|
||||
api_client.create_agent({}, "invalid_key")
|
||||
assert exc_info.value.response.status_code == 401
|
||||
|
||||
|
||||
# Custom pytest plugin to capture test results
|
||||
class ResultCapture:
|
||||
def __init__(self):
|
||||
self.total = 0
|
||||
self.passed = 0
|
||||
self.failed = 0
|
||||
self.errors = 0
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_terminal_summary(
|
||||
terminalreporter: TerminalReporter, exitstatus: int
|
||||
):
|
||||
yield
|
||||
capture = getattr(
|
||||
terminalreporter.config, "_result_capture", None
|
||||
)
|
||||
if capture:
|
||||
capture.total = (
|
||||
len(terminalreporter.stats.get("passed", []))
|
||||
+ len(terminalreporter.stats.get("failed", []))
|
||||
+ len(terminalreporter.stats.get("error", []))
|
||||
)
|
||||
capture.passed = len(terminalreporter.stats.get("passed", []))
|
||||
capture.failed = len(terminalreporter.stats.get("failed", []))
|
||||
capture.errors = len(terminalreporter.stats.get("error", []))
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestReport:
|
||||
total_tests: int
|
||||
passed: int
|
||||
failed: int
|
||||
errors: int
|
||||
|
||||
@property
|
||||
def success_rate(self) -> float:
|
||||
return (
|
||||
(self.passed / self.total_tests) * 100
|
||||
if self.total_tests > 0
|
||||
else 0
|
||||
)
|
||||
|
||||
|
||||
def run_tests() -> TestReport:
|
||||
"""Run tests and generate typed report"""
|
||||
# Create result capture
|
||||
capture = ResultCapture()
|
||||
|
||||
# Create pytest configuration
|
||||
args = [__file__, "-v"]
|
||||
|
||||
# Run pytest with our plugin
|
||||
pytest.main(args, plugins=[capture])
|
||||
|
||||
# Generate report
|
||||
return TestReport(
|
||||
total_tests=capture.total,
|
||||
passed=capture.passed,
|
||||
failed=capture.failed,
|
||||
errors=capture.errors,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Example usage with environment variable
|
||||
# export API_BASE_URL=http://api.example.com/v1
|
||||
|
||||
report = run_tests()
|
||||
print("\nTest Results:")
|
||||
print(f"Total Tests: {report.total_tests}")
|
||||
print(f"Passed: {report.passed}")
|
||||
print(f"Failed: {report.failed}")
|
||||
print(f"Errors: {report.errors}")
|
||||
print(f"Success Rate: {report.success_rate:.2f}%")
|
@ -1,472 +0,0 @@
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
import httpx
|
||||
from loguru import logger
|
||||
|
||||
# Configure logger
|
||||
logger.add(
|
||||
"tests/api_test_{time}.log",
|
||||
rotation="1 day",
|
||||
retention="7 days",
|
||||
level="DEBUG",
|
||||
format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}",
|
||||
)
|
||||
|
||||
|
||||
class TestConfig:
|
||||
"""Test configuration and utilities"""
|
||||
|
||||
BASE_URL: str = "http://localhost:8000/v1"
|
||||
TEST_USERNAME: str = "test_user"
|
||||
api_key: Optional[str] = None
|
||||
user_id: Optional[UUID] = None
|
||||
test_agent_id: Optional[UUID] = None
|
||||
|
||||
|
||||
class TestResult:
|
||||
"""Model for test results"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
test_name: str,
|
||||
status: str,
|
||||
duration: float,
|
||||
error: Optional[str] = None,
|
||||
details: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
self.test_name = test_name
|
||||
self.status = status
|
||||
self.duration = duration
|
||||
self.error = error
|
||||
self.details = details or {}
|
||||
|
||||
def dict(self):
|
||||
return {
|
||||
"test_name": self.test_name,
|
||||
"status": self.status,
|
||||
"duration": self.duration,
|
||||
"error": self.error,
|
||||
"details": self.details,
|
||||
}
|
||||
|
||||
|
||||
async def log_response(
|
||||
response: httpx.Response, test_name: str
|
||||
) -> None:
|
||||
"""Log API response details"""
|
||||
logger.debug(f"\n{test_name} Response:")
|
||||
logger.debug(f"Status Code: {response.status_code}")
|
||||
logger.debug(f"Headers: {dict(response.headers)}")
|
||||
try:
|
||||
logger.debug(f"Body: {response.json()}")
|
||||
except json.JSONDecodeError:
|
||||
logger.debug(f"Body: {response.text}")
|
||||
|
||||
|
||||
async def create_test_user() -> TestResult:
|
||||
"""Create a test user and get API key"""
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
f"{TestConfig.BASE_URL}/users",
|
||||
json={"username": TestConfig.TEST_USERNAME},
|
||||
)
|
||||
await log_response(response, "Create User")
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
TestConfig.api_key = data["api_key"]
|
||||
TestConfig.user_id = UUID(data["user_id"])
|
||||
return TestResult(
|
||||
test_name="create_test_user",
|
||||
status="passed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
details={"user_id": str(TestConfig.user_id)},
|
||||
)
|
||||
else:
|
||||
return TestResult(
|
||||
test_name="create_test_user",
|
||||
status="failed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
error=f"Failed to create user: {response.text}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in create_test_user: {str(e)}")
|
||||
return TestResult(
|
||||
test_name="create_test_user",
|
||||
status="error",
|
||||
duration=(datetime.now() - start_time).total_seconds(),
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
|
||||
async def create_test_agent() -> TestResult:
|
||||
"""Create a test agent"""
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
# Create agent config according to the AgentConfig model
|
||||
agent_config = {
|
||||
"agent_name": "test_agent",
|
||||
"model_name": "gpt-4",
|
||||
"description": "Test agent for API testing",
|
||||
"system_prompt": "You are a test agent.",
|
||||
"temperature": 0.1,
|
||||
"max_loops": 1,
|
||||
"dynamic_temperature_enabled": True,
|
||||
"user_name": TestConfig.TEST_USERNAME,
|
||||
"retry_attempts": 1,
|
||||
"context_length": 4000,
|
||||
"output_type": "string",
|
||||
"streaming_on": False,
|
||||
"tags": ["test", "api"],
|
||||
"stopping_token": "<DONE>",
|
||||
"auto_generate_prompt": False,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
f"{TestConfig.BASE_URL}/agent",
|
||||
json=agent_config,
|
||||
headers={"api-key": TestConfig.api_key},
|
||||
)
|
||||
await log_response(response, "Create Agent")
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
TestConfig.test_agent_id = UUID(data["agent_id"])
|
||||
return TestResult(
|
||||
test_name="create_test_agent",
|
||||
status="passed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
details={
|
||||
"agent_id": str(TestConfig.test_agent_id)
|
||||
},
|
||||
)
|
||||
else:
|
||||
return TestResult(
|
||||
test_name="create_test_agent",
|
||||
status="failed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
error=f"Failed to create agent: {response.text}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in create_test_agent: {str(e)}")
|
||||
return TestResult(
|
||||
test_name="create_test_agent",
|
||||
status="error",
|
||||
duration=(datetime.now() - start_time).total_seconds(),
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
|
||||
async def test_agent_completion() -> TestResult:
|
||||
"""Test agent completion endpoint"""
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
completion_request = {
|
||||
"prompt": "Hello, this is a test prompt.",
|
||||
"agent_id": str(TestConfig.test_agent_id),
|
||||
"max_tokens": 100,
|
||||
"temperature_override": 0.5,
|
||||
"stream": False,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
f"{TestConfig.BASE_URL}/agent/completions",
|
||||
json=completion_request,
|
||||
headers={"api-key": TestConfig.api_key},
|
||||
)
|
||||
await log_response(response, "Agent Completion")
|
||||
|
||||
if response.status_code == 200:
|
||||
return TestResult(
|
||||
test_name="test_agent_completion",
|
||||
status="passed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
details={"response": response.json()},
|
||||
)
|
||||
else:
|
||||
return TestResult(
|
||||
test_name="test_agent_completion",
|
||||
status="failed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
error=f"Failed completion test: {response.text}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in test_agent_completion: {str(e)}")
|
||||
return TestResult(
|
||||
test_name="test_agent_completion",
|
||||
status="error",
|
||||
duration=(datetime.now() - start_time).total_seconds(),
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
|
||||
async def test_agent_metrics() -> TestResult:
|
||||
"""Test agent metrics endpoint"""
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
if not TestConfig.test_agent_id:
|
||||
return TestResult(
|
||||
test_name="test_agent_metrics",
|
||||
status="failed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
error="No test agent ID available",
|
||||
)
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(
|
||||
f"{TestConfig.BASE_URL}/agent/{str(TestConfig.test_agent_id)}/metrics",
|
||||
headers={"api-key": TestConfig.api_key},
|
||||
)
|
||||
await log_response(response, "Agent Metrics")
|
||||
|
||||
if response.status_code == 200:
|
||||
return TestResult(
|
||||
test_name="test_agent_metrics",
|
||||
status="passed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
details={"metrics": response.json()},
|
||||
)
|
||||
else:
|
||||
return TestResult(
|
||||
test_name="test_agent_metrics",
|
||||
status="failed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
error=f"Failed metrics test: {response.text}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in test_agent_metrics: {str(e)}")
|
||||
return TestResult(
|
||||
test_name="test_agent_metrics",
|
||||
status="error",
|
||||
duration=(datetime.now() - start_time).total_seconds(),
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
|
||||
async def test_update_agent() -> TestResult:
|
||||
"""Test agent update endpoint"""
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
if not TestConfig.test_agent_id:
|
||||
return TestResult(
|
||||
test_name="test_update_agent",
|
||||
status="failed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
error="No test agent ID available",
|
||||
)
|
||||
|
||||
update_data = {
|
||||
"description": "Updated test agent description",
|
||||
"tags": ["test", "updated"],
|
||||
"max_loops": 2,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.patch(
|
||||
f"{TestConfig.BASE_URL}/agent/{str(TestConfig.test_agent_id)}",
|
||||
json=update_data,
|
||||
headers={"api-key": TestConfig.api_key},
|
||||
)
|
||||
await log_response(response, "Update Agent")
|
||||
|
||||
if response.status_code == 200:
|
||||
return TestResult(
|
||||
test_name="test_update_agent",
|
||||
status="passed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
details={"update_response": response.json()},
|
||||
)
|
||||
else:
|
||||
return TestResult(
|
||||
test_name="test_update_agent",
|
||||
status="failed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
error=f"Failed update test: {response.text}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in test_update_agent: {str(e)}")
|
||||
return TestResult(
|
||||
test_name="test_update_agent",
|
||||
status="error",
|
||||
duration=(datetime.now() - start_time).total_seconds(),
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
|
||||
async def test_error_handling() -> TestResult:
|
||||
"""Test API error handling"""
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
# Test with invalid API key
|
||||
invalid_agent_id = "00000000-0000-0000-0000-000000000000"
|
||||
response = await client.get(
|
||||
f"{TestConfig.BASE_URL}/agent/{invalid_agent_id}/metrics",
|
||||
headers={"api-key": "invalid_key"},
|
||||
)
|
||||
await log_response(response, "Invalid API Key Test")
|
||||
|
||||
if response.status_code in [401, 403]:
|
||||
return TestResult(
|
||||
test_name="test_error_handling",
|
||||
status="passed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
details={"error_response": response.json()},
|
||||
)
|
||||
else:
|
||||
return TestResult(
|
||||
test_name="test_error_handling",
|
||||
status="failed",
|
||||
duration=(
|
||||
datetime.now() - start_time
|
||||
).total_seconds(),
|
||||
error="Error handling test failed",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in test_error_handling: {str(e)}")
|
||||
return TestResult(
|
||||
test_name="test_error_handling",
|
||||
status="error",
|
||||
duration=(datetime.now() - start_time).total_seconds(),
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
|
||||
async def cleanup_test_resources() -> TestResult:
|
||||
"""Clean up test resources"""
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
if TestConfig.test_agent_id:
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.delete(
|
||||
f"{TestConfig.BASE_URL}/agent/{str(TestConfig.test_agent_id)}",
|
||||
headers={"api-key": TestConfig.api_key},
|
||||
)
|
||||
await log_response(response, "Delete Agent")
|
||||
|
||||
return TestResult(
|
||||
test_name="cleanup_test_resources",
|
||||
status="passed",
|
||||
duration=(datetime.now() - start_time).total_seconds(),
|
||||
details={"cleanup": "completed"},
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in cleanup_test_resources: {str(e)}")
|
||||
return TestResult(
|
||||
test_name="cleanup_test_resources",
|
||||
status="error",
|
||||
duration=(datetime.now() - start_time).total_seconds(),
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
|
||||
async def run_all_tests() -> List[TestResult]:
|
||||
"""Run all tests in sequence"""
|
||||
logger.info("Starting API test suite")
|
||||
results = []
|
||||
|
||||
# Initialize
|
||||
results.append(await create_test_user())
|
||||
if results[-1].status != "passed":
|
||||
logger.error(
|
||||
"Failed to create test user, aborting remaining tests"
|
||||
)
|
||||
return results
|
||||
|
||||
# Add delay to ensure user is properly created
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Core tests
|
||||
test_functions = [
|
||||
create_test_agent,
|
||||
test_agent_completion,
|
||||
test_agent_metrics,
|
||||
test_update_agent,
|
||||
test_error_handling,
|
||||
]
|
||||
|
||||
for test_func in test_functions:
|
||||
result = await test_func()
|
||||
results.append(result)
|
||||
logger.info(f"Test {result.test_name}: {result.status}")
|
||||
if result.error:
|
||||
logger.error(
|
||||
f"Error in {result.test_name}: {result.error}"
|
||||
)
|
||||
|
||||
# Add small delay between tests
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Cleanup
|
||||
results.append(await cleanup_test_resources())
|
||||
|
||||
# Log summary
|
||||
passed = sum(1 for r in results if r.status == "passed")
|
||||
failed = sum(1 for r in results if r.status == "failed")
|
||||
errors = sum(1 for r in results if r.status == "error")
|
||||
|
||||
logger.info("\nTest Summary:")
|
||||
logger.info(f"Total Tests: {len(results)}")
|
||||
logger.info(f"Passed: {passed}")
|
||||
logger.info(f"Failed: {failed}")
|
||||
logger.info(f"Errors: {errors}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for running tests"""
|
||||
logger.info("Starting API testing suite")
|
||||
try:
|
||||
results = asyncio.run(run_all_tests())
|
||||
|
||||
# Write results to JSON file
|
||||
with open("test_results.json", "w") as f:
|
||||
json.dump(
|
||||
[result.dict() for result in results],
|
||||
f,
|
||||
indent=2,
|
||||
default=str,
|
||||
)
|
||||
|
||||
logger.info("Test results written to test_results.json")
|
||||
|
||||
except Exception:
|
||||
logger.error("Fatal error in test suite: ")
|
||||
|
||||
|
||||
main()
|
@ -1,112 +0,0 @@
|
||||
import requests
|
||||
import json
|
||||
from time import sleep
|
||||
|
||||
BASE_URL = "http://0.0.0.0:8000/v1"
|
||||
|
||||
|
||||
def make_request(method, endpoint, data=None):
|
||||
"""Helper function to make requests with error handling"""
|
||||
url = f"{BASE_URL}{endpoint}"
|
||||
try:
|
||||
if method == "GET":
|
||||
response = requests.get(url)
|
||||
elif method == "POST":
|
||||
response = requests.post(url, json=data)
|
||||
elif method == "DELETE":
|
||||
response = requests.delete(url)
|
||||
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(
|
||||
f"Error making {method} request to {endpoint}: {str(e)}"
|
||||
)
|
||||
if hasattr(e.response, "text"):
|
||||
print(f"Response text: {e.response.text}")
|
||||
return None
|
||||
|
||||
|
||||
def create_agent():
|
||||
"""Create a test agent"""
|
||||
data = {
|
||||
"agent_name": "test_agent",
|
||||
"model_name": "gpt-4",
|
||||
"system_prompt": "You are a helpful assistant",
|
||||
"description": "Test agent",
|
||||
"temperature": 0.7,
|
||||
"max_loops": 1,
|
||||
"tags": ["test"],
|
||||
}
|
||||
return make_request("POST", "/v1/agent", data)
|
||||
|
||||
|
||||
def list_agents():
|
||||
"""List all agents"""
|
||||
return make_request("GET", "/v1/agents")
|
||||
|
||||
|
||||
def test_completion(agent_id):
|
||||
"""Test a completion with the agent"""
|
||||
data = {
|
||||
"prompt": "Say hello!",
|
||||
"agent_id": agent_id,
|
||||
"max_tokens": 100,
|
||||
}
|
||||
return make_request("POST", "/v1/agent/completions", data)
|
||||
|
||||
|
||||
def get_agent_metrics(agent_id):
|
||||
"""Get metrics for an agent"""
|
||||
return make_request("GET", f"/v1/agent/{agent_id}/metrics")
|
||||
|
||||
|
||||
def delete_agent(agent_id):
|
||||
"""Delete an agent"""
|
||||
return make_request("DELETE", f"/v1/agent/{agent_id}")
|
||||
|
||||
|
||||
def run_tests():
|
||||
print("Starting API tests...")
|
||||
|
||||
# Create an agent
|
||||
print("\n1. Creating agent...")
|
||||
agent_response = create_agent()
|
||||
if not agent_response:
|
||||
print("Failed to create agent")
|
||||
return
|
||||
|
||||
agent_id = agent_response.get("agent_id")
|
||||
print(f"Created agent with ID: {agent_id}")
|
||||
|
||||
# Give the server a moment to process
|
||||
sleep(2)
|
||||
|
||||
# List agents
|
||||
print("\n2. Listing agents...")
|
||||
agents = list_agents()
|
||||
print(f"Found {len(agents)} agents")
|
||||
|
||||
# Test completion
|
||||
if agent_id:
|
||||
print("\n3. Testing completion...")
|
||||
completion = test_completion(agent_id)
|
||||
if completion:
|
||||
print(
|
||||
f"Completion response: {completion.get('response')}"
|
||||
)
|
||||
|
||||
print("\n4. Getting agent metrics...")
|
||||
metrics = get_agent_metrics(agent_id)
|
||||
if metrics:
|
||||
print(f"Agent metrics: {json.dumps(metrics, indent=2)}")
|
||||
|
||||
# Clean up
|
||||
# print("\n5. Cleaning up - deleting agent...")
|
||||
# delete_result = delete_agent(agent_id)
|
||||
# if delete_result:
|
||||
# print("Successfully deleted agent")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_tests()
|
@ -0,0 +1,258 @@
|
||||
import asyncio
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
import aiohttp
|
||||
import sys
|
||||
from typing import Dict, Any, Optional
|
||||
from loguru import logger
|
||||
import os
|
||||
|
||||
# Configure loguru
|
||||
LOG_PATH = "api_tests.log"
|
||||
logger.add(LOG_PATH,
|
||||
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
|
||||
rotation="1 day",
|
||||
retention="7 days",
|
||||
level="DEBUG"
|
||||
)
|
||||
|
||||
BASE_URL = "https://dev.api.swarms.ai/v1" # Change this to match your server URL
|
||||
|
||||
async def log_request_details(method: str, url: str, headers: dict, data: Any = None):
|
||||
"""Log request details before sending."""
|
||||
logger.debug(f"\n{'='*50}")
|
||||
logger.debug(f"REQUEST: {method} {url}")
|
||||
logger.debug(f"HEADERS: {json.dumps(headers, indent=2)}")
|
||||
if data:
|
||||
logger.debug(f"PAYLOAD: {json.dumps(data, indent=2)}")
|
||||
|
||||
async def log_response_details(response: aiohttp.ClientResponse, data: Any = None):
|
||||
"""Log response details after receiving."""
|
||||
logger.debug(f"\nRESPONSE Status: {response.status}")
|
||||
logger.debug(f"RESPONSE Headers: {json.dumps(dict(response.headers), indent=2)}")
|
||||
if data:
|
||||
logger.debug(f"RESPONSE Body: {json.dumps(data, indent=2)}")
|
||||
logger.debug(f"{'='*50}\n")
|
||||
|
||||
async def test_create_user(session: aiohttp.ClientSession) -> Dict[str, str]:
|
||||
"""Test user creation endpoint."""
|
||||
url = f"{BASE_URL}/users"
|
||||
payload = {"username": "test_user"}
|
||||
|
||||
logger.info("Testing user creation...")
|
||||
await log_request_details("POST", url, {}, payload)
|
||||
|
||||
try:
|
||||
async with session.post(url, json=payload) as response:
|
||||
data = await response.json()
|
||||
await log_response_details(response, data)
|
||||
|
||||
if response.status != 200:
|
||||
logger.error(f"Failed to create user. Status: {response.status}, Response: {data}")
|
||||
sys.exit(1)
|
||||
|
||||
logger.success("✓ Created user successfully")
|
||||
return {"user_id": data["user_id"], "api_key": data["api_key"]}
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception in user creation: {str(e)}")
|
||||
sys.exit(1)
|
||||
|
||||
async def test_create_agent(session: aiohttp.ClientSession, api_key: str) -> str:
|
||||
"""Test agent creation endpoint."""
|
||||
url = f"{BASE_URL}/agent"
|
||||
config = {
|
||||
"agent_name": "test_agent",
|
||||
"system_prompt": "You are a helpful test agent",
|
||||
"model_name": "gpt-4",
|
||||
"description": "Test agent for API validation",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.5,
|
||||
"tags": ["test"],
|
||||
"streaming_on": False,
|
||||
"user_name": "test_user", # Added required field
|
||||
"output_type": "string" # Added required field
|
||||
}
|
||||
|
||||
headers = {"api-key": api_key}
|
||||
logger.info("Testing agent creation...")
|
||||
await log_request_details("POST", url, headers, config)
|
||||
|
||||
try:
|
||||
async with session.post(url, headers=headers, json=config) as response:
|
||||
data = await response.json()
|
||||
await log_response_details(response, data)
|
||||
|
||||
if response.status != 200:
|
||||
logger.error(f"Failed to create agent. Status: {response.status}, Response: {data}")
|
||||
return None
|
||||
|
||||
logger.success("✓ Created agent successfully")
|
||||
return data["agent_id"]
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception in agent creation: {str(e)}")
|
||||
return None
|
||||
|
||||
async def test_agent_update(session: aiohttp.ClientSession, agent_id: str, api_key: str):
|
||||
"""Test agent update endpoint."""
|
||||
url = f"{BASE_URL}/agent/{agent_id}"
|
||||
update_data = {
|
||||
"description": "Updated test agent",
|
||||
"system_prompt": "Updated system prompt",
|
||||
"temperature": 0.7,
|
||||
"tags": ["test", "updated"]
|
||||
}
|
||||
|
||||
headers = {"api-key": api_key}
|
||||
logger.info(f"Testing agent update for agent {agent_id}...")
|
||||
await log_request_details("PATCH", url, headers, update_data)
|
||||
|
||||
try:
|
||||
async with session.patch(url, headers=headers, json=update_data) as response:
|
||||
data = await response.json()
|
||||
await log_response_details(response, data)
|
||||
|
||||
if response.status != 200:
|
||||
logger.error(f"Failed to update agent. Status: {response.status}, Response: {data}")
|
||||
return False
|
||||
|
||||
logger.success("✓ Updated agent successfully")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception in agent update: {str(e)}")
|
||||
return False
|
||||
|
||||
async def test_completion(session: aiohttp.ClientSession, agent_id: str, api_key: str):
|
||||
"""Test completion endpoint."""
|
||||
url = f"{BASE_URL}/agent/completions"
|
||||
completion_request = {
|
||||
"prompt": "Hello, how are you?",
|
||||
"agent_id": agent_id,
|
||||
"max_tokens": 100,
|
||||
"stream": False
|
||||
}
|
||||
|
||||
headers = {"api-key": api_key}
|
||||
logger.info(f"Testing completion for agent {agent_id}...")
|
||||
await log_request_details("POST", url, headers, completion_request)
|
||||
|
||||
try:
|
||||
async with session.post(url, headers=headers, json=completion_request) as response:
|
||||
data = await response.json()
|
||||
await log_response_details(response, data)
|
||||
|
||||
if response.status != 200:
|
||||
logger.error(f"Failed to process completion. Status: {response.status}, Response: {data}")
|
||||
return False
|
||||
|
||||
logger.success("✓ Processed completion successfully")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception in completion processing: {str(e)}")
|
||||
return False
|
||||
|
||||
async def test_get_metrics(session: aiohttp.ClientSession, agent_id: str, api_key: str):
|
||||
"""Test metrics endpoint."""
|
||||
url = f"{BASE_URL}/agent/{agent_id}/metrics"
|
||||
headers = {"api-key": api_key}
|
||||
|
||||
logger.info(f"Testing metrics retrieval for agent {agent_id}...")
|
||||
await log_request_details("GET", url, headers)
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers) as response:
|
||||
data = await response.json()
|
||||
await log_response_details(response, data)
|
||||
|
||||
if response.status != 200:
|
||||
logger.error(f"Failed to get metrics. Status: {response.status}, Response: {data}")
|
||||
return False
|
||||
|
||||
logger.success("✓ Retrieved metrics successfully")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.exception(f"Exception in metrics retrieval: {str(e)}")
|
||||
return False
|
||||
|
||||
async def run_tests():
|
||||
"""Run all API tests."""
|
||||
logger.info("Starting API test suite...")
|
||||
logger.info(f"Using base URL: {BASE_URL}")
|
||||
|
||||
timeout = aiohttp.ClientTimeout(total=30) # 30 second timeout
|
||||
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||
try:
|
||||
# Create test user
|
||||
user_data = await test_create_user(session)
|
||||
if not user_data:
|
||||
logger.error("User creation failed, stopping tests.")
|
||||
return
|
||||
|
||||
logger.info("User created successfully, proceeding with agent tests...")
|
||||
user_id = user_data["user_id"]
|
||||
api_key = user_data["api_key"]
|
||||
|
||||
# Create test agent
|
||||
agent_id = await test_create_agent(session, api_key)
|
||||
if not agent_id:
|
||||
logger.error("Agent creation failed, stopping tests.")
|
||||
return
|
||||
|
||||
logger.info("Agent created successfully, proceeding with other tests...")
|
||||
|
||||
# Run remaining tests
|
||||
test_results = []
|
||||
|
||||
# Test metrics retrieval
|
||||
logger.info("Testing metrics retrieval...")
|
||||
metrics_result = await test_get_metrics(session, agent_id, api_key)
|
||||
test_results.append(("Metrics", metrics_result))
|
||||
|
||||
# Test agent update
|
||||
logger.info("Testing agent update...")
|
||||
update_result = await test_agent_update(session, agent_id, api_key)
|
||||
test_results.append(("Agent Update", update_result))
|
||||
|
||||
# Test completion
|
||||
logger.info("Testing completion...")
|
||||
completion_result = await test_completion(session, agent_id, api_key)
|
||||
test_results.append(("Completion", completion_result))
|
||||
|
||||
# Log final results
|
||||
logger.info("\nTest Results Summary:")
|
||||
all_passed = True
|
||||
for test_name, result in test_results:
|
||||
status = "PASSED" if result else "FAILED"
|
||||
logger.info(f"{test_name}: {status}")
|
||||
if not result:
|
||||
all_passed = False
|
||||
|
||||
if all_passed:
|
||||
logger.success("\n🎉 All tests completed successfully!")
|
||||
else:
|
||||
logger.error("\n❌ Some tests failed. Check the logs for details.")
|
||||
|
||||
logger.info(f"\nDetailed logs available at: {os.path.abspath(LOG_PATH)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Unexpected error during test execution: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
logger.info("Test suite execution completed.")
|
||||
|
||||
def main():
|
||||
logger.info("="*50)
|
||||
logger.info("API TEST SUITE EXECUTION")
|
||||
logger.info("="*50)
|
||||
|
||||
try:
|
||||
asyncio.run(run_tests())
|
||||
except KeyboardInterrupt:
|
||||
logger.warning("Test execution interrupted by user.")
|
||||
except Exception as e:
|
||||
logger.exception("Fatal error in test execution:")
|
||||
finally:
|
||||
logger.info("Test suite shutdown complete.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,111 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms import Agent, GroupChat
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_dotenv()
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
# General Crypto Tax Strategist
|
||||
agent1 = Agent(
|
||||
agent_name="Token-Tax-Strategist",
|
||||
system_prompt="""You are a cryptocurrency tax specialist focusing on token trading in Florida. Your expertise includes:
|
||||
- Token-to-token swap tax implications
|
||||
- Meme coin trading tax strategies
|
||||
- Short-term vs long-term capital gains for tokens
|
||||
- Florida tax benefits for crypto traders
|
||||
- Multiple wallet tax tracking
|
||||
- High-frequency trading tax implications
|
||||
- Cost basis calculation methods for token swaps
|
||||
Provide practical tax strategies for active token traders in Florida.""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
user_name="swarms_corp",
|
||||
output_type="string",
|
||||
streaming_on=True,
|
||||
)
|
||||
|
||||
# Compliance and Reporting Agent
|
||||
agent2 = Agent(
|
||||
agent_name="Florida-Compliance-Expert",
|
||||
system_prompt="""You are a Florida-based crypto tax compliance expert specializing in:
|
||||
- Form 8949 preparation for high-volume token trades
|
||||
- Schedule D reporting for memecoins
|
||||
- Tax loss harvesting for volatile tokens
|
||||
- Proper documentation for DEX transactions
|
||||
- Reporting requirements for airdrops and forks
|
||||
- Multi-exchange transaction reporting
|
||||
- Wash sale considerations for tokens
|
||||
Focus on compliance strategies for active memecoin and token traders.""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
user_name="swarms_corp",
|
||||
output_type="string",
|
||||
streaming_on=True,
|
||||
)
|
||||
|
||||
# DeFi and DEX Specialist
|
||||
agent3 = Agent(
|
||||
agent_name="DeFi-Tax-Specialist",
|
||||
system_prompt="""You are a DeFi tax expert focusing on:
|
||||
- DEX trading tax implications
|
||||
- Liquidity pool tax treatment
|
||||
- Token bridging tax considerations
|
||||
- Gas fee deduction strategies
|
||||
- Failed transaction tax handling
|
||||
- Cross-chain transaction reporting
|
||||
- Impermanent loss tax treatment
|
||||
- Flash loan tax implications
|
||||
Specialize in DeFi platform tax optimization for Florida traders.""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
user_name="swarms_corp",
|
||||
output_type="string",
|
||||
streaming_on=True,
|
||||
)
|
||||
|
||||
# Memecoin and Token Analysis Agent
|
||||
agent4 = Agent(
|
||||
agent_name="Memecoin-Analysis-Expert",
|
||||
system_prompt="""You are a memecoin and token tax analysis expert specializing in:
|
||||
- Memecoin volatility tax implications
|
||||
- Airdrop and token distribution tax treatment
|
||||
- Social token tax considerations
|
||||
- Reflective token tax handling
|
||||
- Rebase token tax implications
|
||||
- Token burn tax treatment
|
||||
- Worthless token write-offs
|
||||
- Pre-sale and fair launch tax strategies
|
||||
Provide expert guidance on memecoin and new token tax scenarios.""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
user_name="swarms_corp",
|
||||
output_type="string",
|
||||
streaming_on=True,
|
||||
)
|
||||
|
||||
agents = [agent1, agent2, agent3, agent4]
|
||||
|
||||
chat = GroupChat(
|
||||
name="Florida Token Tax Advisory",
|
||||
description="Specialized group for memecoin and token tax analysis, compliance, and DeFi trading in Florida",
|
||||
agents=agents,
|
||||
)
|
||||
|
||||
# Example query focused on memecoin trading
|
||||
history = chat.run(
|
||||
"I'm trading memecoins and tokens on various DEXs from Florida. How should I handle my taxes for multiple token swaps, failed transactions, and potential losses? I have made alot of money and paid team members, delaware c corp, using crypto to pay my team"
|
||||
)
|
||||
print(history.model_dump_json(indent=2))
|
@ -0,0 +1,119 @@
|
||||
import os
|
||||
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms import Agent
|
||||
from fluid_api_agent.main import fluid_api_request
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
def omni_api(task: str) -> str:
|
||||
"""
|
||||
Omni API Function: Calls any API dynamically based on the task description.
|
||||
|
||||
This function leverages the `fluid_api_request` method to process a given task
|
||||
and make the necessary API call dynamically. It is designed to be highly flexible,
|
||||
allowing users to interact with a wide variety of APIs without needing
|
||||
predefined configurations.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
task : str
|
||||
A descriptive string outlining the API call or task to be performed.
|
||||
The description should include enough detail for `fluid_api_request`
|
||||
to determine the appropriate API endpoint, request type, and payload.
|
||||
|
||||
Returns:
|
||||
--------
|
||||
dict
|
||||
A dictionary containing the response data from the API call.
|
||||
The structure of the response will vary based on the API being accessed.
|
||||
|
||||
Raises:
|
||||
-------
|
||||
ValueError
|
||||
If the task string is insufficiently descriptive or cannot be mapped
|
||||
to a valid API request.
|
||||
|
||||
HTTPError
|
||||
If the API call results in an HTTP error (e.g., 404 Not Found, 500 Server Error).
|
||||
|
||||
Examples:
|
||||
---------
|
||||
1. Call a weather API to fetch the current weather for a city:
|
||||
|
||||
task = "Fetch the current weather for New York City"
|
||||
response = omni_api(task)
|
||||
print(response)
|
||||
|
||||
2. Retrieve stock prices for a specific company:
|
||||
|
||||
task = "Get the latest stock price for Apple Inc."
|
||||
response = omni_api(task)
|
||||
print(response)
|
||||
|
||||
3. Post a message to a Slack channel:
|
||||
|
||||
task = "Post 'Hello, Team!' to the #general channel in Slack"
|
||||
response = omni_api(task)
|
||||
print(response)
|
||||
|
||||
Notes:
|
||||
------
|
||||
- The `fluid_api_request` function must be implemented to interpret the `task` string
|
||||
and handle API calls accordingly.
|
||||
- Security and authentication for APIs should be managed within `fluid_api_request`.
|
||||
"""
|
||||
return str(fluid_api_request(task))
|
||||
|
||||
|
||||
# Define the system prompt tailored for the API expert
|
||||
API_AGENT_SYS_PROMPT = """
|
||||
You are a highly specialized financial API expert.
|
||||
Your expertise lies in analyzing financial data, making investment recommendations, and
|
||||
interacting with APIs to retrieve, process, and present data effectively.
|
||||
You use tools like 'omni_api' to fetch data dynamically, ensuring accuracy and up-to-date results.
|
||||
|
||||
Instructions:
|
||||
1. Always query relevant APIs to gather insights for tasks.
|
||||
2. When suggesting investments, ensure a diversified portfolio based on the user's budget, risk appetite, and growth potential.
|
||||
3. Verify API responses and retry calls if necessary to ensure data accuracy.
|
||||
"""
|
||||
|
||||
# Customize the agent for financial API tasks
|
||||
agent = Agent(
|
||||
agent_name="API-Finance-Expert",
|
||||
agent_description="An API expert agent specialized in financial analysis and investment planning.",
|
||||
system_prompt=API_AGENT_SYS_PROMPT,
|
||||
max_loops=1, # Allow a few iterations for refining outputs
|
||||
llm=model,
|
||||
dynamic_temperature_enabled=True, # Enable temperature adjustments for optimal creativity
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=5, # Retry API calls to ensure reliability
|
||||
context_length=8192, # Context length for comprehensive analysis
|
||||
return_step_meta=False,
|
||||
output_type="str", # Output tables or results in markdown format
|
||||
auto_generate_prompt=False, # Use the custom system prompt for guidance
|
||||
max_tokens=4000,
|
||||
saved_state_path="api_finance_expert.json",
|
||||
tools=[omni_api], # Integrate the omni_api tool
|
||||
)
|
||||
|
||||
# Run the agent with a financial task
|
||||
agent.run(
|
||||
"Fetch the current price for eth",
|
||||
all_cores=True, # Utilize all processing cores for efficiency
|
||||
)
|
@ -0,0 +1,34 @@
|
||||
import os
|
||||
|
||||
from swarms import SpreadSheetSwarm
|
||||
|
||||
# Create the swarm
|
||||
swarm = SpreadSheetSwarm(
|
||||
name="Crypto-Tax-Optimization-Swarm",
|
||||
description="A swarm of agents performing concurrent financial analysis tasks",
|
||||
max_loops=1,
|
||||
workspace_dir="./workspace",
|
||||
load_path="crypto_tax_swarm_spreadsheet.csv",
|
||||
)
|
||||
|
||||
try:
|
||||
# Ensure workspace directory exists
|
||||
os.makedirs("./workspace", exist_ok=True)
|
||||
|
||||
# Load the financial analysts from CSV
|
||||
swarm.load_from_csv()
|
||||
|
||||
print(f"Loaded {len(swarm.agents)} financial analysis agents")
|
||||
print("\nStarting concurrent financial analysis tasks...")
|
||||
|
||||
# Run all agents concurrently with their configured tasks
|
||||
results = swarm.run()
|
||||
|
||||
print(
|
||||
"\nAnalysis complete! Results saved to:", swarm.save_file_path
|
||||
)
|
||||
print("\nSwarm execution metadata:")
|
||||
print(results)
|
||||
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {str(e)}")
|
|
|
|
|
|
Can't render this file because it has a wrong number of fields in line 3.
|
@ -0,0 +1,34 @@
|
||||
import os
|
||||
|
||||
from swarms import SpreadSheetSwarm
|
||||
|
||||
# Create the swarm
|
||||
swarm = SpreadSheetSwarm(
|
||||
name="Financial-Analysis-Swarm",
|
||||
description="A swarm of agents performing concurrent financial analysis tasks",
|
||||
max_loops=1,
|
||||
workspace_dir="./workspace",
|
||||
load_path="swarm.csv",
|
||||
)
|
||||
|
||||
try:
|
||||
# Ensure workspace directory exists
|
||||
os.makedirs("./workspace", exist_ok=True)
|
||||
|
||||
# Load the financial analysts from CSV
|
||||
swarm.load_from_csv()
|
||||
|
||||
print(f"Loaded {len(swarm.agents)} financial analysis agents")
|
||||
print("\nStarting concurrent financial analysis tasks...")
|
||||
|
||||
# Run all agents concurrently with their configured tasks
|
||||
results = swarm.run()
|
||||
|
||||
print(
|
||||
"\nAnalysis complete! Results saved to:", swarm.save_file_path
|
||||
)
|
||||
print("\nSwarm execution metadata:")
|
||||
print(results)
|
||||
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {str(e)}")
|
@ -0,0 +1,48 @@
|
||||
{
|
||||
"run_id": "spreadsheet_swarm_run_2024-12-25T14:28:32.568788",
|
||||
"name": "Financial-Analysis-Swarm",
|
||||
"description": "A swarm of agents performing concurrent financial analysis tasks",
|
||||
"agents": [
|
||||
"MarketAnalyst",
|
||||
"RiskManager",
|
||||
"TechnicalTrader",
|
||||
"FundamentalAnalyst",
|
||||
"MacroStrategist"
|
||||
],
|
||||
"start_time": "2024-12-25T14:28:32.568788",
|
||||
"end_time": "2024-12-25T14:28:32.568788",
|
||||
"tasks_completed": 5,
|
||||
"outputs": [
|
||||
{
|
||||
"agent_name": "MarketAnalyst",
|
||||
"task": "Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale.",
|
||||
"result": "",
|
||||
"timestamp": "2024-12-25T14:28:32.568788"
|
||||
},
|
||||
{
|
||||
"agent_name": "RiskManager",
|
||||
"task": "Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks",
|
||||
"result": "",
|
||||
"timestamp": "2024-12-25T14:28:32.568788"
|
||||
},
|
||||
{
|
||||
"agent_name": "TechnicalTrader",
|
||||
"task": "Conduct technical analysis of major market indices (S&P 500",
|
||||
"result": "",
|
||||
"timestamp": "2024-12-25T14:28:32.568788"
|
||||
},
|
||||
{
|
||||
"agent_name": "FundamentalAnalyst",
|
||||
"task": "Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio",
|
||||
"result": "",
|
||||
"timestamp": "2024-12-25T14:28:32.568788"
|
||||
},
|
||||
{
|
||||
"agent_name": "MacroStrategist",
|
||||
"task": "Analyze the current macroeconomic environment",
|
||||
"result": "",
|
||||
"timestamp": "2024-12-25T14:28:32.568788"
|
||||
}
|
||||
],
|
||||
"number_of_agents": 5
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
{
|
||||
"run_id": "spreadsheet_swarm_run_2024-12-25T15:00:31.933250",
|
||||
"name": "Financial-Analysis-Swarm",
|
||||
"description": "A swarm of agents performing concurrent financial analysis tasks",
|
||||
"agents": [
|
||||
"MarketAnalyst",
|
||||
"RiskManager",
|
||||
"TechnicalTrader",
|
||||
"FundamentalAnalyst",
|
||||
"MacroStrategist"
|
||||
],
|
||||
"start_time": "2024-12-25T15:00:31.933250",
|
||||
"end_time": "2024-12-25T15:00:31.933250",
|
||||
"tasks_completed": 5,
|
||||
"outputs": [
|
||||
{
|
||||
"agent_name": "MarketAnalyst",
|
||||
"task": "Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale.",
|
||||
"result": "As of the current market conditions, the S&P 500 has been experiencing a moderate growth trend, driven by a combination of factors including a strong labor market, low interest rates, and a gradual recovery in global economic activity. Based on my analysis, the top 3 performing sectors in the S&P 500 are:\n\n1. **Information Technology (IT) Sector**: The IT sector has been the top performer in the S&P 500, with a year-to-date return of 34.6% (as of December 2023). This sector has been driven by the continued growth of cloud computing, artificial intelligence, and cybersecurity. The sector's performance is also supported by the strong earnings growth of major IT companies such as Microsoft, Apple, and Alphabet.\n\nSupporting data:\n* IT sector's price-to-earnings (P/E) ratio: 24.5x, compared to the S&P 500's P/E ratio of 20.5x\n* IT sector's year-over-year earnings growth: 15.6%, outpacing the S&P 500's earnings growth of 10.3%\n* Top-performing IT stocks: Microsoft (MSFT) +43.1%, Apple (AAPL) +36.4%, and Alphabet (GOOGL) +34.1%\n\n2. **Healthcare Sector**: The Healthcare sector has been the second-best performer in the S&P 500, with a year-to-date return of 24.1% (as of December 2023). This sector has been driven by the growing demand for healthcare services, particularly in the areas of biotechnology and pharmaceuticals. The sector's performance is also supported by the strong earnings growth of major healthcare companies such as Johnson & Johnson, UnitedHealth Group, and Pfizer.\n\nSupporting data:\n* Healthcare sector's P/E ratio: 21.3x, compared to the S&P 500's P/E ratio of 20.5x\n* Healthcare sector's year-over-year earnings growth: 12.1%, outpacing the S&P 500's earnings growth of 10.3%\n* Top-performing healthcare stocks: Johnson & Johnson (JNJ) +29.4%, UnitedHealth Group (UNH) +26.4%, and Pfizer (PFE) +24.5%\n\n3. **Consumer Discretionary Sector**: The Consumer Discretionary sector has been the third-best performer in the S&P 500, with a year-to-date return of 22.4% (as of December 2023). This sector has been driven by the strong consumer spending, particularly in the areas of e-commerce, travel, and leisure. The sector's performance is also supported by the strong earnings growth of major consumer discretionary companies such as Amazon, McDonald's, and Visa.\n\nSupporting data:\n* Consumer Discretionary sector's P/E ratio: 23.1x, compared to the S&P 500's P/E ratio of 20.5x\n* Consumer Discretionary sector's year-over-year earnings growth: 11.4%, outpacing the S&P 500's earnings growth of 10.3%\n* Top-performing consumer discretionary stocks: Amazon (AMZN) +30.4%, McDonald's (MCD) +25.1%, and Visa (V) +24.1%\n\nRationale:\nThe outperformance of these sectors can be attributed to a combination of factors, including:\n\n* Strong earnings growth: The IT, Healthcare, and Consumer Discretionary sectors have all reported strong earnings growth, driven by their respective industry trends and demand.\n* Low interest rates: The low interest rate environment has made it easier for companies to borrow and invest in growth initiatives, which has benefited the IT and Consumer Discretionary sectors in particular.\n* Global economic recovery: The gradual recovery in global economic activity has supported the performance of the Healthcare and Consumer Discretionary sectors, which are more sensitive to economic cycles.\n\nOverall, these sectors are well-positioned for continued growth, driven by their respective industry trends and demand. However, it's essential to monitor the market conditions and adjust investment strategies accordingly, as sector rotations and market trends can change rapidly.",
|
||||
"timestamp": "2024-12-25T15:00:31.933250"
|
||||
},
|
||||
{
|
||||
"agent_name": "RiskManager",
|
||||
"task": "Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks",
|
||||
"result": "**Comprehensive Risk Analysis Report**\n\n**Portfolio Overview:**\nThe diversified portfolio consists of 60% stocks, with the remaining 40% allocated to other asset classes (e.g., bonds, real estate, commodities). The stock component is further divided into various sectors and geographic regions to minimize concentration risk.\n\n**Risk Assessment:**\n\n1. **Market Risk:**\n\t* **Beta:** The portfolio's beta is estimated to be 1.2, indicating a moderate level of systematic risk. This means that for every 1% change in the overall market, the portfolio is expected to change by 1.2%.\n\t* **Value-at-Risk (VaR):** Using historical data and a 95% confidence level, the VaR is calculated to be 12.5%. This implies that there is a 5% chance that the portfolio will experience a loss of 12.5% or more over a one-year period.\n2. **Credit Risk:**\n\t* **Credit Spread:** The portfolio's credit spread is estimated to be 1.5%, which is relatively moderate. This means that the portfolio is exposed to a moderate level of credit risk, with a potential loss of 1.5% due to credit defaults.\n3. **Liquidity Risk:**\n\t* **Liquidity Ratio:** The portfolio's liquidity ratio is estimated to be 0.8, indicating a moderate level of liquidity risk. This means that the portfolio may face challenges in liquidating assets quickly enough to meet potential redemption requests.\n4. **Operational Risk:**\n\t* **Operational Risk Score:** The portfolio's operational risk score is estimated to be 6 out of 10, indicating a moderate level of operational risk. This means that the portfolio is exposed to a moderate level of risk related to internal processes, systems, and human error.\n5. **Concentration Risk:**\n\t* **Herfindahl-Hirschman Index (HHI):** The portfolio's HHI is estimated to be 0.15, indicating a moderate level of concentration risk. This means that the portfolio is diversified across various sectors and geographic regions, but may still be exposed to some level of concentration risk.\n\n**Quantitative Metrics:**\n\n1. **Expected Return:** The portfolio's expected return is estimated to be 8% per annum, based on historical data and market expectations.\n2. **Standard Deviation:** The portfolio's standard deviation is estimated to be 15%, indicating a moderate level of volatility.\n3. **Sharpe Ratio:** The portfolio's Sharpe ratio is estimated to be 0.55, indicating a moderate level of risk-adjusted return.\n4. **Sortino Ratio:** The portfolio's Sortino ratio is estimated to be 0.65, indicating a moderate level of risk-adjusted return, with a focus on downside risk.\n\n**Risk Mitigation Strategies:**\n\n1. **Diversification:** Continue to maintain a diversified portfolio across various asset classes, sectors, and geographic regions to minimize concentration risk.\n2. **Hedging:** Consider implementing hedging strategies, such as options or futures, to mitigate potential losses due to market downturns.\n3. **Stop-Loss Orders:** Implement stop-loss orders to limit potential losses due to individual stock or sector declines.\n4. **Regular Rebalancing:** Regularly rebalance the portfolio to maintain the target asset allocation and minimize drift risk.\n5. **Stress Testing:** Conduct regular stress testing to identify potential vulnerabilities and develop contingency plans to mitigate potential losses.\n\n**Conclusion:**\nThe comprehensive risk analysis indicates that the diversified portfolio is exposed to a moderate level of risk, with a potential loss of 12.5% or more over a one-year period. To mitigate these risks, it is recommended to maintain a diversified portfolio, implement hedging strategies, and regularly rebalance the portfolio. Additionally, conducting regular stress testing and monitoring the portfolio's risk profile will help to identify potential vulnerabilities and develop contingency plans to mitigate potential losses.",
|
||||
"timestamp": "2024-12-25T15:00:31.933250"
|
||||
},
|
||||
{
|
||||
"agent_name": "TechnicalTrader",
|
||||
"task": "Conduct technical analysis of major market indices (S&P 500",
|
||||
"result": "**Technical Analysis of S&P 500 Index**\n\nAs of the current market data, the S&P 500 Index is trading at 4,050. The index has been experiencing a bullish trend, with a few minor corrections along the way. Here's a breakdown of the technical analysis:\n\n**Chart Patterns:**\n\n1. **Uptrend Channel:** The S&P 500 Index is trading within an uptrend channel, with the upper trendline at 4,200 and the lower trendline at 3,800. This channel has been intact since the beginning of the year.\n2. **Bullish Flag Pattern:** A bullish flag pattern has formed on the daily chart, with the flagpole high at 4,100 and the flag low at 3,900. This pattern suggests a potential breakout above 4,100.\n3. **Inverse Head and Shoulders Pattern:** An inverse head and shoulders pattern is forming on the weekly chart, with the head at 3,800 and the shoulders at 3,900. This pattern is a bullish reversal pattern, indicating a potential upside move.\n\n**Technical Indicators:**\n\n1. **Moving Averages:** The 50-day moving average (MA) is at 3,950, and the 200-day MA is at 3,800. The index is trading above both MAs, indicating a bullish trend.\n2. **Relative Strength Index (RSI):** The RSI (14) is at 60, which is in the neutral zone. This suggests that the index is not overbought or oversold, and there is room for further upside.\n3. **Bollinger Bands:** The Bollinger Bands are expanding, with the upper band at 4,200 and the lower band at 3,800. This indicates increased volatility and a potential breakout.\n4. **Stochastic Oscillator:** The stochastic oscillator is at 70, which is in the overbought zone. However, the oscillator is still above 50, indicating a bullish trend.\n\n**Trading Signals:**\n\n1. **Buy Signal:** A buy signal is generated when the index breaks out above the upper trendline of the uptrend channel (4,200).\n2. **Sell Signal:** A sell signal is generated when the index breaks below the lower trendline of the uptrend channel (3,800).\n3. **Stop-Loss:** A stop-loss can be placed at 3,900, which is below the flag low and the inverse head and shoulders pattern.\n\n**Actionable Trading Insights:**\n\n1. **Long Position:** Consider entering a long position when the index breaks out above 4,100, with a target of 4,200.\n2. **Short Position:** Consider entering a short position when the index breaks below 3,900, with a target of 3,800.\n3. **Risk Management:** Use a stop-loss at 3,900 to limit potential losses.\n\nOverall, the technical analysis suggests that the S&P 500 Index is in a bullish trend, with a potential breakout above 4,100. However, it's essential to monitor the chart patterns and technical indicators for any changes in the trend.",
|
||||
"timestamp": "2024-12-25T15:00:31.933250"
|
||||
},
|
||||
{
|
||||
"agent_name": "FundamentalAnalyst",
|
||||
"task": "Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio",
|
||||
"result": "To conduct a fundamental analysis of top technology companies, I have selected three prominent players in the industry: Apple Inc. (AAPL), Microsoft Corporation (MSFT), and Alphabet Inc. (GOOGL). Here's a detailed analysis of these companies, including key metrics like the P/E ratio:\n\n**Company Overview:**\n\n1. **Apple Inc. (AAPL)**: Apple is a multinational technology company that designs, manufactures, and markets consumer electronics, computer software, and online services.\n2. **Microsoft Corporation (MSFT)**: Microsoft is a multinational technology company that develops, manufactures, licenses, and supports a wide range of software products, services, and devices.\n3. **Alphabet Inc. (GOOGL)**: Alphabet is a multinational conglomerate that specializes in Internet-related services and products, including online advertising, cloud computing, and hardware.\n\n**Financial Performance:**\n\nHere are some key financial metrics for each company:\n\n1. **Apple Inc. (AAPL)**\n\t* Revenue (2022): $394.3 billion\n\t* Net Income (2022): $99.8 billion\n\t* P/E Ratio (2022): 24.5\n\t* Dividend Yield (2022): 0.85%\n2. **Microsoft Corporation (MSFT)**\n\t* Revenue (2022): $242.1 billion\n\t* Net Income (2022): $69.4 billion\n\t* P/E Ratio (2022): 31.4\n\t* Dividend Yield (2022): 0.93%\n3. **Alphabet Inc. (GOOGL)**\n\t* Revenue (2022): $257.6 billion\n\t* Net Income (2022): $50.3 billion\n\t* P/E Ratio (2022): 26.3\n\t* Dividend Yield (2022): 0.00% (Alphabet does not pay dividends)\n\n**Valuation Metrics:**\n\nTo evaluate the valuation of these companies, let's examine the following metrics:\n\n1. **Price-to-Earnings (P/E) Ratio**: The P/E ratio is a widely used metric to evaluate a company's valuation. A higher P/E ratio indicates that investors are willing to pay more for each dollar of earnings.\n\t* Apple: 24.5\n\t* Microsoft: 31.4\n\t* Alphabet: 26.3\n2. **Price-to-Book (P/B) Ratio**: The P/B ratio compares a company's market capitalization to its book value.\n\t* Apple: 14.1\n\t* Microsoft: 12.3\n\t* Alphabet: 6.3\n3. **Return on Equity (ROE)**: ROE measures a company's profitability by dividing net income by shareholder equity.\n\t* Apple: 24.1%\n\t* Microsoft: 43.1%\n\t* Alphabet: 20.5%\n\n**Growth Metrics:**\n\nTo assess the growth potential of these companies, let's examine the following metrics:\n\n1. **Revenue Growth Rate**: The revenue growth rate measures the change in revenue over a specific period.\n\t* Apple: 7.8% (2022 vs. 2021)\n\t* Microsoft: 17.1% (2022 vs. 2021)\n\t* Alphabet: 21.5% (2022 vs. 2021)\n2. **Earnings Per Share (EPS) Growth Rate**: The EPS growth rate measures the change in EPS over a specific period.\n\t* Apple: 9.1% (2022 vs. 2021)\n\t* Microsoft: 19.1% (2022 vs. 2021)\n\t* Alphabet: 15.6% (2022 vs. 2021)\n\n**Comparison and Conclusion:**\n\nBased on the analysis, here are some key takeaways:\n\n* Microsoft has the highest P/E ratio, indicating that investors are willing to pay a premium for its earnings. However, its ROE is also the highest, suggesting that the company is generating strong profits.\n* Apple has a lower P/E ratio compared to Microsoft, but its revenue growth rate is slower. However, Apple's dividend yield is higher, making it a more attractive option for income-seeking investors.\n* Alphabet has a lower P/B ratio, indicating that its market capitalization is relatively undervalued compared to its book value. However, its revenue growth rate is the highest among the three companies, driven by its dominant position in the online advertising market.\n\nOverall, each company has its strengths and weaknesses. Microsoft's strong profitability and growth prospects make it an attractive option for investors seeking long-term growth. Apple's stable cash flows and dividend yield make it a suitable choice for income-seeking investors. Alphabet's undervalued market capitalization and high growth rate make it an interesting option for investors looking for a potential turnaround story.",
|
||||
"timestamp": "2024-12-25T15:00:31.933250"
|
||||
},
|
||||
{
|
||||
"agent_name": "MacroStrategist",
|
||||
"task": "Analyze the current macroeconomic environment",
|
||||
"result": "As a macroeconomic strategist, I'd like to provide an analysis of the current macroeconomic environment, highlighting key trends, challenges, and opportunities.\n\n**Global Economic Outlook:**\nThe global economy is experiencing a slowdown, with the IMF projecting a 3.3% growth rate for 2023, down from 3.8% in 2022. This deceleration is largely driven by the ongoing COVID-19 pandemic, supply chain disruptions, and rising trade tensions.\n\n**Key Trends:**\n\n1. **Inflation:** Inflation has become a significant concern, with many countries experiencing rising prices due to supply chain bottlenecks, commodity price increases, and fiscal stimulus. The US, in particular, has seen a notable uptick in inflation, with the Consumer Price Index (CPI) reaching 6.8% in November 2022.\n2. **Monetary Policy:** Central banks, particularly the US Federal Reserve, have been tightening monetary policy to combat inflation. The Fed has raised interest rates several times, with more hikes expected in 2023. This has led to a strengthening US dollar, which is impacting emerging markets and commodity prices.\n3. **Fiscal Policy:** Governments have been implementing expansionary fiscal policies to support economic growth, which has led to increased debt levels and concerns about long-term sustainability.\n4. **Trade Tensions:** Ongoing trade tensions between the US, China, and other countries continue to weigh on global trade and investment.\n\n**Market Implications:**\n\n1. **Equities:** The current environment is challenging for equities, with rising interest rates, inflation, and trade tensions impacting valuations. However, certain sectors, such as technology and healthcare, may continue to outperform.\n2. **Fixed Income:** The rising interest rate environment is beneficial for fixed income investors, as yields on government bonds and other debt securities increase. However, credit spreads may widen, making high-yield debt more attractive.\n3. **Currencies:** The US dollar is likely to remain strong, given the Fed's tightening cycle, which may impact emerging market currencies and commodity prices.\n4. **Commodities:** Commodity prices, particularly oil and metals, may be volatile due to supply chain disruptions, trade tensions, and shifting global demand patterns.\n\n**Opportunities:**\n\n1. **Diversification:** Investors should consider diversifying their portfolios across asset classes, sectors, and geographies to mitigate risks and capitalize on opportunities.\n2. **Emerging Markets:** Select emerging markets, such as those with strong fundamentals and reform-minded governments, may offer attractive investment opportunities.\n3. **Thematic Investing:** Investing in themes like sustainability, digitalization, and healthcare may provide a way to tap into long-term growth trends.\n4. **Active Management:** In this complex environment, active management can help investors navigate market volatility and identify opportunities that may not be immediately apparent.\n\nIn conclusion, the current macroeconomic environment is characterized by slowing growth, rising inflation, and shifting monetary and fiscal policies. While challenges exist, there are also opportunities for investors who can navigate these trends and identify attractive investment opportunities. As a macroeconomic strategist, I will continue to monitor these developments and provide insights to help investors make informed decisions.",
|
||||
"timestamp": "2024-12-25T15:00:31.933250"
|
||||
}
|
||||
],
|
||||
"number_of_agents": 5
|
||||
}
|
@ -0,0 +1,295 @@
|
||||
from datetime import datetime
|
||||
import json
|
||||
import requests
|
||||
from loguru import logger
|
||||
from dataclasses import dataclass
|
||||
from datetime import timezone
|
||||
import time
|
||||
from requests.adapters import HTTPAdapter
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
# Configure loguru logger
|
||||
logger.add(
|
||||
"solana_transactions.log",
|
||||
rotation="500 MB",
|
||||
retention="10 days",
|
||||
level="INFO",
|
||||
format="{time} {level} {message}",
|
||||
)
|
||||
|
||||
# Reliable public RPC endpoints
|
||||
RPC_ENDPOINTS = [
|
||||
"https://api.mainnet-beta.solana.com",
|
||||
"https://solana.public-rpc.com",
|
||||
"https://rpc.ankr.com/solana",
|
||||
]
|
||||
|
||||
|
||||
@dataclass
|
||||
class TransactionError:
|
||||
"""Data class to represent transaction errors"""
|
||||
|
||||
error_type: str
|
||||
message: str
|
||||
timestamp: str = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
class SolanaAPIException(Exception):
|
||||
"""Custom exception for Solana API related errors"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def create_http_session() -> requests.Session:
|
||||
"""
|
||||
Creates a requests session with retry logic and timeouts
|
||||
"""
|
||||
session = requests.Session()
|
||||
|
||||
# Configure retry strategy
|
||||
retry_strategy = Retry(
|
||||
total=3,
|
||||
backoff_factor=0.5,
|
||||
status_forcelist=[429, 500, 502, 503, 504],
|
||||
)
|
||||
|
||||
adapter = HTTPAdapter(max_retries=retry_strategy)
|
||||
session.mount("http://", adapter)
|
||||
session.mount("https://", adapter)
|
||||
|
||||
return session
|
||||
|
||||
|
||||
def get_working_endpoint(session: requests.Session) -> str:
|
||||
"""
|
||||
Tests endpoints and returns the first working one.
|
||||
|
||||
Args:
|
||||
session: requests.Session object with retry logic
|
||||
|
||||
Returns:
|
||||
str: Working RPC endpoint URL
|
||||
|
||||
Raises:
|
||||
SolanaAPIException: If no working endpoint is found
|
||||
"""
|
||||
for endpoint in RPC_ENDPOINTS:
|
||||
try:
|
||||
payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "getHealth",
|
||||
}
|
||||
response = session.post(endpoint, json=payload, timeout=5)
|
||||
if response.status_code == 200:
|
||||
logger.info(f"Using RPC endpoint: {endpoint}")
|
||||
return endpoint
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Endpoint {endpoint} failed health check: {str(e)}"
|
||||
)
|
||||
continue
|
||||
|
||||
raise SolanaAPIException("No working RPC endpoints found")
|
||||
|
||||
|
||||
def fetch_wallet_transactions(wallet_address: str) -> str:
|
||||
"""
|
||||
Fetches all transactions for a given Solana wallet address using public RPC endpoints.
|
||||
|
||||
Args:
|
||||
wallet_address (str): The Solana wallet address to fetch transactions for
|
||||
Example: "CtBLg4AX6LQfKVtPPUWqJyQ5cRfHydUwuZZ87rmojA1P"
|
||||
|
||||
Returns:
|
||||
str: JSON string containing the list of transactions and their details
|
||||
Format: {
|
||||
"success": bool,
|
||||
"transactions": List[Dict],
|
||||
"error": Optional[Dict]
|
||||
}
|
||||
"""
|
||||
try:
|
||||
# Validate wallet address format (basic check)
|
||||
if (
|
||||
not isinstance(wallet_address, str)
|
||||
or len(wallet_address) != 44
|
||||
):
|
||||
raise ValueError(
|
||||
f"Invalid Solana wallet address format: {wallet_address}"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Fetching transactions for wallet: {wallet_address}"
|
||||
)
|
||||
|
||||
# Create session with retry logic
|
||||
session = create_http_session()
|
||||
|
||||
# Get working endpoint
|
||||
api_endpoint = get_working_endpoint(session)
|
||||
|
||||
# Initialize variables for pagination
|
||||
all_transactions = []
|
||||
before_signature = None
|
||||
limit = 25 # Smaller batch size to be more conservative
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Prepare request payload
|
||||
payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": "1",
|
||||
"method": "getSignaturesForAddress",
|
||||
"params": [
|
||||
wallet_address,
|
||||
{"limit": limit, "before": before_signature},
|
||||
],
|
||||
}
|
||||
|
||||
# Make API request
|
||||
response = session.post(
|
||||
api_endpoint, json=payload, timeout=10
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
|
||||
if "error" in data:
|
||||
error_code = data.get("error", {}).get("code")
|
||||
if error_code == 429: # Rate limit
|
||||
time.sleep(1) # Wait before trying again
|
||||
continue
|
||||
|
||||
raise SolanaAPIException(
|
||||
f"API error: {data['error']}"
|
||||
)
|
||||
|
||||
# Extract transactions from response
|
||||
transactions = data.get("result", [])
|
||||
|
||||
if not transactions:
|
||||
break
|
||||
|
||||
# Add transactions to our list
|
||||
all_transactions.extend(transactions)
|
||||
|
||||
# Update pagination cursor
|
||||
before_signature = transactions[-1]["signature"]
|
||||
|
||||
logger.info(
|
||||
f"Fetched {len(transactions)} transactions. Total: {len(all_transactions)}"
|
||||
)
|
||||
|
||||
# Break if we received fewer transactions than the limit
|
||||
if len(transactions) < limit:
|
||||
break
|
||||
|
||||
# Add small delay between batches
|
||||
time.sleep(0.2)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error during transaction fetch: {str(e)}"
|
||||
)
|
||||
# Try to get a new endpoint if the current one fails
|
||||
api_endpoint = get_working_endpoint(session)
|
||||
continue
|
||||
|
||||
# Enrich transaction data with additional details
|
||||
enriched_transactions = []
|
||||
for tx in all_transactions:
|
||||
try:
|
||||
tx_payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": "1",
|
||||
"method": "getTransaction",
|
||||
"params": [
|
||||
tx["signature"],
|
||||
{
|
||||
"encoding": "json",
|
||||
"maxSupportedTransactionVersion": 0,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
response = session.post(
|
||||
api_endpoint, json=tx_payload, timeout=10
|
||||
)
|
||||
tx_data = response.json()
|
||||
|
||||
if "result" in tx_data and tx_data["result"]:
|
||||
enriched_transactions.append(
|
||||
{
|
||||
"signature": tx["signature"],
|
||||
"slot": tx["slot"],
|
||||
"timestamp": tx["blockTime"],
|
||||
"status": (
|
||||
"success"
|
||||
if not tx.get("err")
|
||||
else "error"
|
||||
),
|
||||
"details": tx_data["result"],
|
||||
}
|
||||
)
|
||||
|
||||
# Small delay between transaction fetches
|
||||
time.sleep(0.1)
|
||||
|
||||
# print(tx)
|
||||
logger.info(f"Enriched transaction: {tx}")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to fetch details for transaction {tx['signature']}: {str(e)}"
|
||||
)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
f"Successfully fetched and enriched {len(enriched_transactions)} transactions"
|
||||
)
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"transactions": enriched_transactions,
|
||||
"error": None,
|
||||
}
|
||||
)
|
||||
|
||||
except SolanaAPIException as e:
|
||||
error = TransactionError(
|
||||
error_type="API_ERROR", message=str(e)
|
||||
)
|
||||
logger.error(f"API error: {error.message}")
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"transactions": [],
|
||||
"error": error.__dict__,
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error = TransactionError(
|
||||
error_type="UNKNOWN_ERROR",
|
||||
message=f"An unexpected error occurred: {str(e)}",
|
||||
)
|
||||
logger.error(f"Unexpected error: {error.message}")
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"transactions": [],
|
||||
"error": error.__dict__,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
wallet = "CtBLg4AX6LQfKVtPPUWqJyQ5cRfHydUwuZZ87rmojA1P"
|
||||
|
||||
try:
|
||||
result = fetch_wallet_transactions(wallet)
|
||||
print(json.dumps(json.loads(result), indent=2))
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch transactions: {str(e)}")
|
@ -0,0 +1,240 @@
|
||||
from typing import Dict, List, Optional, Union, Any
|
||||
from datetime import datetime
|
||||
import json
|
||||
import requests
|
||||
from loguru import logger
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
import time
|
||||
import random
|
||||
|
||||
# Configure loguru logger
|
||||
logger.add(
|
||||
"solana_transactions.log",
|
||||
rotation="500 MB",
|
||||
retention="10 days",
|
||||
level="INFO",
|
||||
format="{time} {level} {message}"
|
||||
)
|
||||
|
||||
# Most reliable RPC endpoints
|
||||
RPC_ENDPOINTS = [
|
||||
"https://api.mainnet-beta.solana.com",
|
||||
"https://rpc.ankr.com/solana",
|
||||
"https://solana.getblock.io/mainnet"
|
||||
]
|
||||
|
||||
@dataclass
|
||||
class TransactionError:
|
||||
"""Data class to represent transaction errors"""
|
||||
error_type: str
|
||||
message: str
|
||||
timestamp: str = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
class SolanaAPIException(Exception):
|
||||
"""Custom exception for Solana API related errors"""
|
||||
pass
|
||||
|
||||
class RPCEndpointManager:
|
||||
"""Manages RPC endpoints and handles switching between them"""
|
||||
|
||||
def __init__(self, endpoints: List[str]):
|
||||
self.endpoints = endpoints.copy()
|
||||
self.current_endpoint = self.endpoints[0]
|
||||
self.last_request_time = 0
|
||||
self.min_request_interval = 0.2 # Increased minimum interval
|
||||
self.total_requests = 0
|
||||
self.max_requests_per_endpoint = 3
|
||||
|
||||
def get_endpoint(self) -> str:
|
||||
"""Get current endpoint with rate limiting"""
|
||||
now = time.time()
|
||||
time_since_last = now - self.last_request_time
|
||||
if time_since_last < self.min_request_interval:
|
||||
time.sleep(self.min_request_interval - time_since_last)
|
||||
|
||||
self.total_requests += 1
|
||||
if self.total_requests >= self.max_requests_per_endpoint:
|
||||
self.switch_endpoint()
|
||||
self.total_requests = 0
|
||||
|
||||
self.last_request_time = time.time()
|
||||
return self.current_endpoint
|
||||
|
||||
def switch_endpoint(self) -> str:
|
||||
"""Switch to next available endpoint"""
|
||||
current = self.current_endpoint
|
||||
available_endpoints = [ep for ep in self.endpoints if ep != current]
|
||||
|
||||
if not available_endpoints:
|
||||
raise SolanaAPIException("All endpoints exhausted")
|
||||
|
||||
self.current_endpoint = random.choice(available_endpoints)
|
||||
logger.info(f"Switched to endpoint: {self.current_endpoint}")
|
||||
return self.current_endpoint
|
||||
|
||||
def make_request(endpoint_manager: RPCEndpointManager, payload: dict, retry_count: int = 3) -> dict:
|
||||
"""
|
||||
Makes a request with automatic endpoint switching and error handling.
|
||||
"""
|
||||
last_error = None
|
||||
|
||||
for attempt in range(retry_count):
|
||||
try:
|
||||
endpoint = endpoint_manager.get_endpoint()
|
||||
|
||||
response = requests.post(
|
||||
endpoint,
|
||||
json=payload,
|
||||
timeout=10,
|
||||
headers={"Content-Type": "application/json"},
|
||||
verify=True # Ensure SSL verification
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise SolanaAPIException(f"HTTP {response.status_code}: {response.text}")
|
||||
|
||||
data = response.json()
|
||||
|
||||
if "error" in data:
|
||||
error_code = data["error"].get("code")
|
||||
if error_code == 429: # Rate limit
|
||||
logger.warning(f"Rate limit hit, switching endpoint...")
|
||||
endpoint_manager.switch_endpoint()
|
||||
time.sleep(2 ** attempt) # Exponential backoff
|
||||
continue
|
||||
|
||||
if "message" in data["error"]:
|
||||
raise SolanaAPIException(f"RPC error: {data['error']['message']}")
|
||||
|
||||
return data
|
||||
|
||||
except (requests.exceptions.SSLError, requests.exceptions.ConnectionError) as e:
|
||||
logger.warning(f"Connection error with {endpoint}: {str(e)}")
|
||||
endpoint_manager.switch_endpoint()
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
logger.warning(f"Request failed: {str(e)}")
|
||||
endpoint_manager.switch_endpoint()
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
raise SolanaAPIException(f"All retry attempts failed. Last error: {str(last_error)}")
|
||||
|
||||
def fetch_wallet_transactions(wallet_address: str, max_transactions: int = 10) -> str:
|
||||
"""
|
||||
Fetches recent transactions for a given Solana wallet address.
|
||||
|
||||
Args:
|
||||
wallet_address (str): The Solana wallet address to fetch transactions for
|
||||
max_transactions (int, optional): Maximum number of transactions to fetch. Defaults to 10.
|
||||
|
||||
Returns:
|
||||
str: JSON string containing transaction details
|
||||
"""
|
||||
try:
|
||||
if not isinstance(wallet_address, str) or len(wallet_address) != 44:
|
||||
raise ValueError(f"Invalid Solana wallet address format: {wallet_address}")
|
||||
|
||||
if not isinstance(max_transactions, int) or max_transactions < 1:
|
||||
raise ValueError("max_transactions must be a positive integer")
|
||||
|
||||
logger.info(f"Fetching up to {max_transactions} transactions for wallet: {wallet_address}")
|
||||
|
||||
endpoint_manager = RPCEndpointManager(RPC_ENDPOINTS)
|
||||
|
||||
# Get transaction signatures
|
||||
signatures_payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": str(random.randint(1, 1000)),
|
||||
"method": "getSignaturesForAddress",
|
||||
"params": [
|
||||
wallet_address,
|
||||
{"limit": max_transactions}
|
||||
]
|
||||
}
|
||||
|
||||
signatures_data = make_request(endpoint_manager, signatures_payload)
|
||||
|
||||
transactions = signatures_data.get("result", [])
|
||||
if not transactions:
|
||||
logger.info("No transactions found for this wallet")
|
||||
return json.dumps({
|
||||
"success": True,
|
||||
"transactions": [],
|
||||
"error": None,
|
||||
"transaction_count": 0
|
||||
}, indent=2)
|
||||
|
||||
logger.info(f"Found {len(transactions)} transactions")
|
||||
|
||||
# Process transactions
|
||||
enriched_transactions = []
|
||||
for tx in transactions:
|
||||
try:
|
||||
tx_payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": str(random.randint(1, 1000)),
|
||||
"method": "getTransaction",
|
||||
"params": [
|
||||
tx["signature"],
|
||||
{"encoding": "json", "maxSupportedTransactionVersion": 0}
|
||||
]
|
||||
}
|
||||
|
||||
tx_data = make_request(endpoint_manager, tx_payload)
|
||||
|
||||
if "result" in tx_data and tx_data["result"]:
|
||||
result = tx_data["result"]
|
||||
enriched_tx = {
|
||||
"signature": tx["signature"],
|
||||
"slot": tx["slot"],
|
||||
"timestamp": tx.get("blockTime"),
|
||||
"success": not tx.get("err"),
|
||||
}
|
||||
|
||||
if "meta" in result:
|
||||
enriched_tx["fee"] = result["meta"].get("fee")
|
||||
if "preBalances" in result["meta"] and "postBalances" in result["meta"]:
|
||||
enriched_tx["balance_change"] = sum(result["meta"]["postBalances"]) - sum(result["meta"]["preBalances"])
|
||||
|
||||
enriched_transactions.append(enriched_tx)
|
||||
logger.info(f"Processed transaction {tx['signature'][:8]}...")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process transaction {tx['signature']}: {str(e)}")
|
||||
continue
|
||||
|
||||
logger.info(f"Successfully processed {len(enriched_transactions)} transactions")
|
||||
|
||||
return json.dumps({
|
||||
"success": True,
|
||||
"transactions": enriched_transactions,
|
||||
"error": None,
|
||||
"transaction_count": len(enriched_transactions)
|
||||
}, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
error = TransactionError(
|
||||
error_type="API_ERROR",
|
||||
message=str(e)
|
||||
)
|
||||
logger.error(f"Error: {error.message}")
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"transactions": [],
|
||||
"error": error.__dict__,
|
||||
"transaction_count": 0
|
||||
}, indent=2)
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Example wallet address
|
||||
wallet = "CtBLg4AX6LQfKVtPPUWqJyQ5cRfHydUwuZZ87rmojA1P"
|
||||
|
||||
try:
|
||||
result = fetch_wallet_transactions(wallet)
|
||||
print(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch transactions: {str(e)}")
|
@ -1,145 +0,0 @@
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
from typing import Any, List, Optional, Union
|
||||
|
||||
from doc_master import doc_master
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
|
||||
from swarms.utils.loguru_logger import initialize_logger
|
||||
|
||||
logger = initialize_logger(log_folder="add_docs_to_agents")
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
)
|
||||
def _process_document(doc_path: Union[str, Path]) -> str:
|
||||
"""Safely process a single document with retries.
|
||||
|
||||
Args:
|
||||
doc_path: Path to the document to process
|
||||
|
||||
Returns:
|
||||
Processed document text
|
||||
|
||||
Raises:
|
||||
Exception: If document processing fails after retries
|
||||
"""
|
||||
try:
|
||||
return doc_master(
|
||||
file_path=str(doc_path), output_type="string"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error processing document {doc_path}: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def handle_input_docs(
|
||||
agents: Any,
|
||||
docs: Optional[List[Union[str, Path]]] = None,
|
||||
doc_folder: Optional[Union[str, Path]] = None,
|
||||
max_workers: int = 4,
|
||||
chunk_size: int = 1000000,
|
||||
) -> Any:
|
||||
"""
|
||||
Add document content to agent prompts with improved reliability and performance.
|
||||
|
||||
Args:
|
||||
agents: Dictionary mapping agent names to Agent objects
|
||||
docs: List of document paths
|
||||
doc_folder: Path to folder containing documents
|
||||
max_workers: Maximum number of parallel document processing workers
|
||||
chunk_size: Maximum characters to process at once to avoid memory issues
|
||||
|
||||
Raises:
|
||||
ValueError: If neither docs nor doc_folder is provided
|
||||
RuntimeError: If document processing fails
|
||||
"""
|
||||
if not agents:
|
||||
logger.warning(
|
||||
"No agents provided, skipping document distribution"
|
||||
)
|
||||
return
|
||||
|
||||
if not docs and not doc_folder:
|
||||
logger.warning(
|
||||
"No documents or folder provided, skipping document distribution"
|
||||
)
|
||||
return
|
||||
|
||||
logger.info("Starting document distribution to agents")
|
||||
|
||||
try:
|
||||
processed_docs = []
|
||||
|
||||
# Process individual documents in parallel
|
||||
if docs:
|
||||
with ThreadPoolExecutor(
|
||||
max_workers=max_workers
|
||||
) as executor:
|
||||
future_to_doc = {
|
||||
executor.submit(_process_document, doc): doc
|
||||
for doc in docs
|
||||
}
|
||||
|
||||
for future in as_completed(future_to_doc):
|
||||
doc = future_to_doc[future]
|
||||
try:
|
||||
processed_docs.append(future.result())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to process document {doc}: {str(e)}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Document processing failed: {str(e)}"
|
||||
)
|
||||
|
||||
# Process folder if specified
|
||||
elif doc_folder:
|
||||
try:
|
||||
folder_content = doc_master(
|
||||
folder_path=str(doc_folder), output_type="string"
|
||||
)
|
||||
processed_docs.append(folder_content)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to process folder {doc_folder}: {str(e)}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Folder processing failed: {str(e)}"
|
||||
)
|
||||
|
||||
# Combine and chunk the processed documents
|
||||
combined_data = "\n".join(processed_docs)
|
||||
|
||||
# Update agent prompts in chunks to avoid memory issues
|
||||
for agent in agents.values():
|
||||
try:
|
||||
for i in range(0, len(combined_data), chunk_size):
|
||||
chunk = combined_data[i : i + chunk_size]
|
||||
if i == 0:
|
||||
agent.system_prompt += (
|
||||
"\nDocuments:\n" + chunk
|
||||
)
|
||||
else:
|
||||
agent.system_prompt += chunk
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to update agent prompt: {str(e)}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Agent prompt update failed: {str(e)}"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Successfully added documents to {len(agents)} agents"
|
||||
)
|
||||
|
||||
return agents
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Document distribution failed: {str(e)}")
|
||||
raise RuntimeError(f"Document distribution failed: {str(e)}")
|
@ -0,0 +1,286 @@
|
||||
import os
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from typing import Callable, Dict, List, Optional
|
||||
|
||||
from loguru import logger
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.rearrange import AgentRearrange
|
||||
|
||||
|
||||
class TestResult:
|
||||
"""Class to store test results and metadata"""
|
||||
def __init__(self, test_name: str):
|
||||
self.test_name = test_name
|
||||
self.start_time = datetime.now()
|
||||
self.end_time = None
|
||||
self.success = False
|
||||
self.error = None
|
||||
self.traceback = None
|
||||
self.function_output = None
|
||||
|
||||
def complete(self, success: bool, error: Optional[Exception] = None):
|
||||
"""Complete the test execution with results"""
|
||||
self.end_time = datetime.now()
|
||||
self.success = success
|
||||
if error:
|
||||
self.error = str(error)
|
||||
self.traceback = traceback.format_exc()
|
||||
|
||||
def duration(self) -> float:
|
||||
"""Calculate test duration in seconds"""
|
||||
if self.end_time:
|
||||
return (self.end_time - self.start_time).total_seconds()
|
||||
return 0
|
||||
|
||||
def run_test(test_func: Callable) -> TestResult:
|
||||
"""
|
||||
Decorator to run tests with error handling and logging
|
||||
|
||||
Args:
|
||||
test_func (Callable): Test function to execute
|
||||
|
||||
Returns:
|
||||
TestResult: Object containing test execution details
|
||||
"""
|
||||
def wrapper(*args, **kwargs) -> TestResult:
|
||||
result = TestResult(test_func.__name__)
|
||||
logger.info(f"\n{'='*20} Running test: {test_func.__name__} {'='*20}")
|
||||
|
||||
try:
|
||||
output = test_func(*args, **kwargs)
|
||||
result.function_output = output
|
||||
result.complete(success=True)
|
||||
logger.success(f"✅ Test {test_func.__name__} passed successfully")
|
||||
|
||||
except Exception as e:
|
||||
result.complete(success=False, error=e)
|
||||
logger.error(f"❌ Test {test_func.__name__} failed with error: {str(e)}")
|
||||
logger.error(f"Traceback: {traceback.format_exc()}")
|
||||
|
||||
logger.info(f"Test duration: {result.duration():.2f} seconds\n")
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
def create_functional_agents() -> List[Agent]:
|
||||
"""
|
||||
Create a list of functional agents with real LLM integration for testing.
|
||||
Using OpenAI's GPT model for realistic agent behavior testing.
|
||||
"""
|
||||
# Initialize OpenAI Chat model
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
if not api_key:
|
||||
logger.warning("No OpenAI API key found. Using mock agents instead.")
|
||||
return [create_mock_agent("TestAgent1"), create_mock_agent("TestAgent2")]
|
||||
|
||||
try:
|
||||
model = OpenAIChat(
|
||||
api_key=api_key,
|
||||
model_name="gpt-4o",
|
||||
temperature=0.1
|
||||
)
|
||||
|
||||
# Create boss agent
|
||||
boss_agent = Agent(
|
||||
agent_name="BossAgent",
|
||||
system_prompt="""
|
||||
You are the BossAgent responsible for managing and overseeing test scenarios.
|
||||
Your role is to coordinate tasks between agents and ensure efficient collaboration.
|
||||
Analyze inputs, break down tasks, and provide clear directives to other agents.
|
||||
Maintain a structured approach to task management and result compilation.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="test_boss_agent.json",
|
||||
)
|
||||
|
||||
# Create analysis agent
|
||||
analysis_agent = Agent(
|
||||
agent_name="AnalysisAgent",
|
||||
system_prompt="""
|
||||
You are the AnalysisAgent responsible for detailed data processing and analysis.
|
||||
Your role is to examine input data, identify patterns, and provide analytical insights.
|
||||
Focus on breaking down complex information into clear, actionable components.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="test_analysis_agent.json",
|
||||
)
|
||||
|
||||
# Create summary agent
|
||||
summary_agent = Agent(
|
||||
agent_name="SummaryAgent",
|
||||
system_prompt="""
|
||||
You are the SummaryAgent responsible for consolidating and summarizing information.
|
||||
Your role is to take detailed analysis and create concise, actionable summaries.
|
||||
Focus on highlighting key points and ensuring clarity in communication.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="test_summary_agent.json",
|
||||
)
|
||||
|
||||
logger.info("Successfully created functional agents with LLM integration")
|
||||
return [boss_agent, analysis_agent, summary_agent]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create functional agents: {str(e)}")
|
||||
logger.warning("Falling back to mock agents")
|
||||
return [create_mock_agent("TestAgent1"), create_mock_agent("TestAgent2")]
|
||||
|
||||
def create_mock_agent(name: str) -> Agent:
|
||||
"""Create a mock agent for testing when LLM integration is not available"""
|
||||
return Agent(
|
||||
agent_name=name,
|
||||
system_prompt=f"You are a test agent named {name}",
|
||||
llm=None
|
||||
)
|
||||
|
||||
@run_test
|
||||
def test_init():
|
||||
"""Test AgentRearrange initialization with functional agents"""
|
||||
logger.info("Creating agents for initialization test")
|
||||
agents = create_functional_agents()
|
||||
|
||||
rearrange = AgentRearrange(
|
||||
name="TestRearrange",
|
||||
agents=agents,
|
||||
flow=f"{agents[0].agent_name} -> {agents[1].agent_name} -> {agents[2].agent_name}"
|
||||
)
|
||||
|
||||
assert rearrange.name == "TestRearrange"
|
||||
assert len(rearrange.agents) == 3
|
||||
assert rearrange.flow == f"{agents[0].agent_name} -> {agents[1].agent_name} -> {agents[2].agent_name}"
|
||||
|
||||
logger.info(f"Initialized AgentRearrange with {len(agents)} agents")
|
||||
return True
|
||||
|
||||
@run_test
|
||||
def test_validate_flow():
|
||||
"""Test flow validation logic"""
|
||||
agents = create_functional_agents()
|
||||
rearrange = AgentRearrange(
|
||||
agents=agents,
|
||||
flow=f"{agents[0].agent_name} -> {agents[1].agent_name}"
|
||||
)
|
||||
|
||||
logger.info("Testing valid flow pattern")
|
||||
valid = rearrange.validate_flow()
|
||||
assert valid is True
|
||||
|
||||
logger.info("Testing invalid flow pattern")
|
||||
rearrange.flow = f"{agents[0].agent_name} {agents[1].agent_name}" # Missing arrow
|
||||
try:
|
||||
rearrange.validate_flow()
|
||||
assert False, "Should have raised ValueError"
|
||||
except ValueError as e:
|
||||
logger.info(f"Successfully caught invalid flow error: {str(e)}")
|
||||
assert True
|
||||
|
||||
return True
|
||||
|
||||
@run_test
|
||||
def test_add_remove_agent():
|
||||
"""Test adding and removing agents from the swarm"""
|
||||
agents = create_functional_agents()
|
||||
rearrange = AgentRearrange(agents=agents[:2]) # Start with first two agents
|
||||
|
||||
logger.info("Testing agent addition")
|
||||
new_agent = agents[2] # Use the third agent as new agent
|
||||
rearrange.add_agent(new_agent)
|
||||
assert new_agent.agent_name in rearrange.agents
|
||||
|
||||
logger.info("Testing agent removal")
|
||||
rearrange.remove_agent(new_agent.agent_name)
|
||||
assert new_agent.agent_name not in rearrange.agents
|
||||
|
||||
return True
|
||||
|
||||
@run_test
|
||||
def test_basic_run():
|
||||
"""Test basic task execution with the swarm"""
|
||||
agents = create_functional_agents()
|
||||
rearrange = AgentRearrange(
|
||||
name="TestSwarm",
|
||||
agents=agents,
|
||||
flow=f"{agents[0].agent_name} -> {agents[1].agent_name} -> {agents[2].agent_name}",
|
||||
max_loops=1
|
||||
)
|
||||
|
||||
test_task = "Analyze this test message and provide a brief summary."
|
||||
logger.info(f"Running test task: {test_task}")
|
||||
|
||||
try:
|
||||
result = rearrange.run(test_task)
|
||||
assert result is not None
|
||||
logger.info(f"Successfully executed task with result length: {len(str(result))}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Task execution failed: {str(e)}")
|
||||
raise
|
||||
|
||||
def run_all_tests() -> Dict[str, TestResult]:
|
||||
"""
|
||||
Run all test cases and collect results
|
||||
|
||||
Returns:
|
||||
Dict[str, TestResult]: Dictionary mapping test names to their results
|
||||
"""
|
||||
logger.info("\n🚀 Starting AgentRearrange test suite execution")
|
||||
test_functions = [
|
||||
test_init,
|
||||
test_validate_flow,
|
||||
test_add_remove_agent,
|
||||
test_basic_run
|
||||
]
|
||||
|
||||
results = {}
|
||||
for test in test_functions:
|
||||
result = test()
|
||||
results[test.__name__] = result
|
||||
|
||||
# Log summary
|
||||
total_tests = len(results)
|
||||
passed_tests = sum(1 for r in results.values() if r.success)
|
||||
failed_tests = total_tests - passed_tests
|
||||
|
||||
logger.info("\n📊 Test Suite Summary:")
|
||||
logger.info(f"Total Tests: {total_tests}")
|
||||
print(f"✅ Passed: {passed_tests}")
|
||||
|
||||
if failed_tests > 0:
|
||||
logger.error(f"❌ Failed: {failed_tests}")
|
||||
|
||||
# Detailed failure information
|
||||
if failed_tests > 0:
|
||||
logger.error("\n❌ Failed Tests Details:")
|
||||
for name, result in results.items():
|
||||
if not result.success:
|
||||
logger.error(f"\n{name}:")
|
||||
logger.error(f"Error: {result.error}")
|
||||
logger.error(f"Traceback: {result.traceback}")
|
||||
|
||||
return results
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🌟 Starting AgentRearrange Test Suite")
|
||||
results = run_all_tests()
|
||||
print("🏁 Test Suite Execution Completed")
|
Loading…
Reference in new issue