parent
1899c807eb
commit
9b07f448ae
@ -0,0 +1,64 @@
|
|||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.prompts.finance_agent_sys_prompt import (
|
||||||
|
FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
)
|
||||||
|
from swarms.utils.str_to_dict import str_to_dict
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_stock_price",
|
||||||
|
"description": "Retrieve the current stock price and related information for a specified company.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"ticker": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The stock ticker symbol of the company, e.g. AAPL for Apple Inc.",
|
||||||
|
},
|
||||||
|
"include_history": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Indicates whether to include historical price data along with the current price.",
|
||||||
|
},
|
||||||
|
"time": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "date-time",
|
||||||
|
"description": "Optional parameter to specify the time for which the stock data is requested, in ISO 8601 format.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"ticker",
|
||||||
|
"include_history",
|
||||||
|
"time",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Financial-Analysis-Agent",
|
||||||
|
agent_description="Personal finance advisor agent",
|
||||||
|
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
max_loops=1,
|
||||||
|
tools_list_dictionary=tools,
|
||||||
|
)
|
||||||
|
|
||||||
|
out = agent.run(
|
||||||
|
"What is the current stock price for Apple Inc. (AAPL)? Include historical price data.",
|
||||||
|
)
|
||||||
|
|
||||||
|
print(out)
|
||||||
|
|
||||||
|
print(type(out))
|
||||||
|
|
||||||
|
print(str_to_dict(out))
|
||||||
|
|
||||||
|
print(type(str_to_dict(out)))
|
@ -0,0 +1,461 @@
|
|||||||
|
import asyncio
|
||||||
|
import concurrent.futures
|
||||||
|
import os
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
from typing import Any, Dict, List, Tuple
|
||||||
|
|
||||||
|
import aiohttp
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from rich.console import Console
|
||||||
|
|
||||||
|
from swarms.agents.reasoning_duo import ReasoningDuo
|
||||||
|
from swarms.structs.agent import Agent
|
||||||
|
from swarms.structs.conversation import Conversation
|
||||||
|
from swarms.utils.any_to_str import any_to_str
|
||||||
|
from swarms.utils.formatter import formatter
|
||||||
|
from swarms.utils.history_output_formatter import (
|
||||||
|
history_output_formatter,
|
||||||
|
)
|
||||||
|
from swarms.utils.str_to_dict import str_to_dict
|
||||||
|
|
||||||
|
console = Console()
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Number of worker threads for concurrent operations
|
||||||
|
MAX_WORKERS = (
|
||||||
|
os.cpu_count() * 2
|
||||||
|
) # Optimal number of workers based on CPU cores
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# 1. System Prompts for Each Scientist Agent
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
|
||||||
|
def format_exa_results(json_data: Dict[str, Any]) -> str:
|
||||||
|
"""Formats Exa.ai search results into structured text"""
|
||||||
|
if "error" in json_data:
|
||||||
|
return f"### Error\n{json_data['error']}\n"
|
||||||
|
|
||||||
|
# Pre-allocate formatted_text list with initial capacity
|
||||||
|
formatted_text = []
|
||||||
|
|
||||||
|
# Extract search metadata
|
||||||
|
search_params = json_data.get("effectiveFilters", {})
|
||||||
|
query = search_params.get("query", "General web search")
|
||||||
|
formatted_text.append(
|
||||||
|
f"### Exa Search Results for: '{query}'\n\n---\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process results
|
||||||
|
results = json_data.get("results", [])
|
||||||
|
|
||||||
|
if not results:
|
||||||
|
formatted_text.append("No results found.\n")
|
||||||
|
return "".join(formatted_text)
|
||||||
|
|
||||||
|
def process_result(
|
||||||
|
result: Dict[str, Any], index: int
|
||||||
|
) -> List[str]:
|
||||||
|
"""Process a single result in a thread-safe manner"""
|
||||||
|
title = result.get("title", "No title")
|
||||||
|
url = result.get("url", result.get("id", "No URL"))
|
||||||
|
published_date = result.get("publishedDate", "")
|
||||||
|
|
||||||
|
# Handle highlights efficiently
|
||||||
|
highlights = result.get("highlights", [])
|
||||||
|
highlight_text = (
|
||||||
|
"\n".join(
|
||||||
|
(
|
||||||
|
h.get("text", str(h))
|
||||||
|
if isinstance(h, dict)
|
||||||
|
else str(h)
|
||||||
|
)
|
||||||
|
for h in highlights[:3]
|
||||||
|
)
|
||||||
|
if highlights
|
||||||
|
else "No summary available"
|
||||||
|
)
|
||||||
|
|
||||||
|
return [
|
||||||
|
f"{index}. **{title}**\n",
|
||||||
|
f" - URL: {url}\n",
|
||||||
|
f" - Published: {published_date.split('T')[0] if published_date else 'Date unknown'}\n",
|
||||||
|
f" - Key Points:\n {highlight_text}\n\n",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Process results concurrently
|
||||||
|
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
|
||||||
|
future_to_result = {
|
||||||
|
executor.submit(process_result, result, i + 1): i
|
||||||
|
for i, result in enumerate(results)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Collect results in order
|
||||||
|
processed_results = [None] * len(results)
|
||||||
|
for future in as_completed(future_to_result):
|
||||||
|
idx = future_to_result[future]
|
||||||
|
try:
|
||||||
|
processed_results[idx] = future.result()
|
||||||
|
except Exception as e:
|
||||||
|
console.print(
|
||||||
|
f"[bold red]Error processing result {idx + 1}: {str(e)}[/bold red]"
|
||||||
|
)
|
||||||
|
processed_results[idx] = [
|
||||||
|
f"Error processing result {idx + 1}: {str(e)}\n"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Extend formatted text with processed results in correct order
|
||||||
|
for result_text in processed_results:
|
||||||
|
formatted_text.extend(result_text)
|
||||||
|
|
||||||
|
return "".join(formatted_text)
|
||||||
|
|
||||||
|
|
||||||
|
async def _async_exa_search(
|
||||||
|
query: str, **kwargs: Any
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Asynchronous helper function for Exa.ai API requests"""
|
||||||
|
api_url = "https://api.exa.ai/search"
|
||||||
|
headers = {
|
||||||
|
"x-api-key": os.getenv("EXA_API_KEY"),
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"query": query,
|
||||||
|
"useAutoprompt": True,
|
||||||
|
"numResults": kwargs.get("num_results", 10),
|
||||||
|
"contents": {
|
||||||
|
"text": True,
|
||||||
|
"highlights": {"numSentences": 2},
|
||||||
|
},
|
||||||
|
**kwargs,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.post(
|
||||||
|
api_url, json=payload, headers=headers
|
||||||
|
) as response:
|
||||||
|
if response.status != 200:
|
||||||
|
return {
|
||||||
|
"error": f"HTTP {response.status}: {await response.text()}"
|
||||||
|
}
|
||||||
|
return await response.json()
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
|
||||||
|
def exa_search(query: str, **kwargs: Any) -> str:
|
||||||
|
"""Performs web search using Exa.ai API with concurrent processing"""
|
||||||
|
try:
|
||||||
|
# Run async search in the event loop
|
||||||
|
loop = asyncio.new_event_loop()
|
||||||
|
asyncio.set_event_loop(loop)
|
||||||
|
try:
|
||||||
|
response_json = loop.run_until_complete(
|
||||||
|
_async_exa_search(query, **kwargs)
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
loop.close()
|
||||||
|
|
||||||
|
# Format results concurrently
|
||||||
|
formatted_text = format_exa_results(response_json)
|
||||||
|
|
||||||
|
return formatted_text
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Unexpected error: {str(e)}"
|
||||||
|
console.print(f"[bold red]{error_msg}[/bold red]")
|
||||||
|
return error_msg
|
||||||
|
|
||||||
|
|
||||||
|
# Define the research tools schema
|
||||||
|
tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "search_topic",
|
||||||
|
"description": "Conduct an in-depth search on a specified topic or subtopic, generating a comprehensive array of highly detailed search queries tailored to the input parameters.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"depth": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Indicates the level of thoroughness for the search. Values range from 1 to 3, where 1 represents a superficial search and 3 signifies an exploration of the topic.",
|
||||||
|
},
|
||||||
|
"detailed_queries": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "An array of highly specific search queries that are generated based on the input query and the specified depth. Each query should be designed to elicit detailed and relevant information from various sources.",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Each item in this array should represent a unique search query that targets a specific aspect of the main topic, ensuring a comprehensive exploration of the subject matter.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["depth", "detailed_queries"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
RESEARCH_AGENT_PROMPT = """
|
||||||
|
You are an advanced research agent specialized in conducting deep, comprehensive research across multiple domains.
|
||||||
|
Your task is to:
|
||||||
|
|
||||||
|
1. Break down complex topics into searchable subtopics
|
||||||
|
2. Generate diverse search queries to explore each subtopic thoroughly
|
||||||
|
3. Identify connections and patterns across different areas of research
|
||||||
|
4. Synthesize findings into coherent insights
|
||||||
|
5. Identify gaps in current knowledge and suggest areas for further investigation
|
||||||
|
|
||||||
|
For each research task:
|
||||||
|
- Consider multiple perspectives and approaches
|
||||||
|
- Look for both supporting and contradicting evidence
|
||||||
|
- Evaluate the credibility and relevance of sources
|
||||||
|
- Track emerging trends and recent developments
|
||||||
|
- Consider cross-disciplinary implications
|
||||||
|
|
||||||
|
Output Format:
|
||||||
|
- Provide structured research plans
|
||||||
|
- Include specific search queries for each subtopic
|
||||||
|
- Prioritize queries based on relevance and potential impact
|
||||||
|
- Suggest follow-up areas for deeper investigation
|
||||||
|
"""
|
||||||
|
|
||||||
|
SUMMARIZATION_AGENT_PROMPT = """
|
||||||
|
You are an expert information synthesis and summarization agent designed for producing clear, accurate, and insightful summaries of complex information. Your core capabilities include:
|
||||||
|
|
||||||
|
|
||||||
|
Core Capabilities:
|
||||||
|
- Identify and extract key concepts, themes, and insights from any given content
|
||||||
|
- Recognize patterns, relationships, and hierarchies within information
|
||||||
|
- Filter out noise while preserving crucial context and nuance
|
||||||
|
- Handle multiple sources and perspectives simultaneously
|
||||||
|
|
||||||
|
Summarization Strategy
|
||||||
|
1. Multi-level Structure
|
||||||
|
- Provide an extensive summary
|
||||||
|
- Follow with key findings
|
||||||
|
- Include detailed insights with supporting evidence
|
||||||
|
- End with implications or next steps when relevant
|
||||||
|
|
||||||
|
2. Quality Standards
|
||||||
|
- Maintain factual accuracy and precision
|
||||||
|
- Preserve important technical details and terminology
|
||||||
|
- Avoid oversimplification of complex concepts
|
||||||
|
- Include quantitative data when available
|
||||||
|
- Cite or reference specific sources when summarizing claims
|
||||||
|
|
||||||
|
3. Clarity & Accessibility
|
||||||
|
- Use clear, concise language
|
||||||
|
- Define technical terms when necessary
|
||||||
|
- Structure information logically
|
||||||
|
- Use formatting to enhance readability
|
||||||
|
- Maintain appropriate level of technical depth for the audience
|
||||||
|
|
||||||
|
4. Synthesis & Analysis
|
||||||
|
- Identify conflicting information or viewpoints
|
||||||
|
- Highlight consensus across sources
|
||||||
|
- Note gaps or limitations in the information
|
||||||
|
- Draw connections between related concepts
|
||||||
|
- Provide context for better understanding
|
||||||
|
|
||||||
|
OUTPUT REQUIREMENTS:
|
||||||
|
- Begin with a clear statement of the topic or question being addressed
|
||||||
|
- Use consistent formatting and structure
|
||||||
|
- Clearly separate different levels of detail
|
||||||
|
- Include confidence levels for conclusions when appropriate
|
||||||
|
- Note any areas requiring additional research or clarification
|
||||||
|
|
||||||
|
Remember: Your goal is to make complex information accessible while maintaining accuracy and depth. Prioritize clarity without sacrificing important nuance or detail."""
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the research agent
|
||||||
|
research_agent = Agent(
|
||||||
|
agent_name="Deep-Research-Agent",
|
||||||
|
agent_description="Specialized agent for conducting comprehensive research across multiple domains",
|
||||||
|
system_prompt=RESEARCH_AGENT_PROMPT,
|
||||||
|
max_loops=1, # Allow multiple iterations for thorough research
|
||||||
|
tools_list_dictionary=tools,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
reasoning_duo = ReasoningDuo(
|
||||||
|
system_prompt=SUMMARIZATION_AGENT_PROMPT, output_type="string"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class DeepResearchSwarm:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
name: str = "DeepResearchSwarm",
|
||||||
|
description: str = "A swarm that conducts comprehensive research across multiple domains",
|
||||||
|
research_agent: Agent = research_agent,
|
||||||
|
max_loops: int = 1,
|
||||||
|
nice_print: bool = True,
|
||||||
|
output_type: str = "json",
|
||||||
|
max_workers: int = os.cpu_count()
|
||||||
|
* 2, # Let the system decide optimal thread count
|
||||||
|
token_count: bool = False,
|
||||||
|
):
|
||||||
|
self.name = name
|
||||||
|
self.description = description
|
||||||
|
self.research_agent = research_agent
|
||||||
|
self.max_loops = max_loops
|
||||||
|
self.nice_print = nice_print
|
||||||
|
self.output_type = output_type
|
||||||
|
self.max_workers = max_workers
|
||||||
|
|
||||||
|
self.reliability_check()
|
||||||
|
self.conversation = Conversation(token_count=token_count)
|
||||||
|
|
||||||
|
# Create a persistent ThreadPoolExecutor for the lifetime of the swarm
|
||||||
|
# This eliminates thread creation overhead on each query
|
||||||
|
self.executor = concurrent.futures.ThreadPoolExecutor(
|
||||||
|
max_workers=self.max_workers
|
||||||
|
)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
"""Clean up the executor on object destruction"""
|
||||||
|
self.executor.shutdown(wait=False)
|
||||||
|
|
||||||
|
def reliability_check(self):
|
||||||
|
"""Check the reliability of the query"""
|
||||||
|
if self.max_loops < 1:
|
||||||
|
raise ValueError("max_loops must be greater than 0")
|
||||||
|
|
||||||
|
formatter.print_panel(
|
||||||
|
"DeepResearchSwarm is booting up...", "blue"
|
||||||
|
)
|
||||||
|
formatter.print_panel("Reliability check passed", "green")
|
||||||
|
|
||||||
|
def get_queries(self, query: str) -> List[str]:
|
||||||
|
"""
|
||||||
|
Generate a list of detailed search queries based on the input query.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): The main research query to explore
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: A list of detailed search queries
|
||||||
|
"""
|
||||||
|
self.conversation.add(role="User", content=query)
|
||||||
|
|
||||||
|
# Get the agent's response
|
||||||
|
agent_output = self.research_agent.run(query)
|
||||||
|
|
||||||
|
self.conversation.add(
|
||||||
|
role=self.research_agent.agent_name, content=agent_output
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert the string output to dictionary
|
||||||
|
output_dict = str_to_dict(agent_output)
|
||||||
|
|
||||||
|
# Print the conversation history
|
||||||
|
if self.nice_print:
|
||||||
|
to_do_list = any_to_str(output_dict)
|
||||||
|
formatter.print_panel(to_do_list, "blue")
|
||||||
|
|
||||||
|
# Extract the detailed queries from the output
|
||||||
|
if (
|
||||||
|
isinstance(output_dict, dict)
|
||||||
|
and "detailed_queries" in output_dict
|
||||||
|
):
|
||||||
|
queries = output_dict["detailed_queries"]
|
||||||
|
formatter.print_panel(
|
||||||
|
f"Generated {len(queries)} queries", "blue"
|
||||||
|
)
|
||||||
|
return queries
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _process_query(self, query: str) -> Tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Process a single query with search and reasoning.
|
||||||
|
This function is designed to be run in a separate thread.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): The query to process
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[str, str]: A tuple containing (search_results, reasoning_output)
|
||||||
|
"""
|
||||||
|
# Run the search
|
||||||
|
results = exa_search(query)
|
||||||
|
|
||||||
|
# Run the reasoning on the search results
|
||||||
|
reasoning_output = reasoning_duo.run(results)
|
||||||
|
|
||||||
|
return (results, reasoning_output)
|
||||||
|
|
||||||
|
def step(self, query: str):
|
||||||
|
"""
|
||||||
|
Execute a single research step with maximum parallelism.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): The research query to process
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted conversation history
|
||||||
|
"""
|
||||||
|
# Get all the queries to process
|
||||||
|
queries = self.get_queries(query)
|
||||||
|
|
||||||
|
# Submit all queries for concurrent processing
|
||||||
|
# Using a list instead of generator for clearer debugging
|
||||||
|
futures = []
|
||||||
|
for q in queries:
|
||||||
|
future = self.executor.submit(self._process_query, q)
|
||||||
|
futures.append((q, future))
|
||||||
|
|
||||||
|
# Process results as they complete (no waiting for slower queries)
|
||||||
|
for q, future in futures:
|
||||||
|
try:
|
||||||
|
# Get results (blocks until this specific future is done)
|
||||||
|
results, reasoning_output = future.result()
|
||||||
|
|
||||||
|
# Add search results to conversation
|
||||||
|
self.conversation.add(
|
||||||
|
role="User",
|
||||||
|
content=f"Search results for {q}: \n {results}",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add reasoning output to conversation
|
||||||
|
self.conversation.add(
|
||||||
|
role=reasoning_duo.agent_name,
|
||||||
|
content=reasoning_output,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
# Handle any errors in the thread
|
||||||
|
self.conversation.add(
|
||||||
|
role="System",
|
||||||
|
content=f"Error processing query '{q}': {str(e)}",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Once all query processing is complete, generate the final summary
|
||||||
|
# This step runs after all queries to ensure it summarizes all results
|
||||||
|
final_summary = reasoning_duo.run(
|
||||||
|
f"Generate an extensive report of the following content: {self.conversation.get_str()}"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.conversation.add(
|
||||||
|
role=reasoning_duo.agent_name,
|
||||||
|
content=final_summary,
|
||||||
|
)
|
||||||
|
|
||||||
|
return history_output_formatter(
|
||||||
|
self.conversation, type=self.output_type
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# # Example usage
|
||||||
|
# if __name__ == "__main__":
|
||||||
|
# swarm = DeepResearchSwarm(
|
||||||
|
# output_type="json",
|
||||||
|
# )
|
||||||
|
# print(
|
||||||
|
# swarm.step(
|
||||||
|
# "What is the active tarrif situation with mexico? Only create 2 queries"
|
||||||
|
# )
|
||||||
|
# )
|
@ -0,0 +1,27 @@
|
|||||||
|
import json
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
|
||||||
|
def str_to_dict(s: str, retries: int = 3) -> Dict:
|
||||||
|
"""
|
||||||
|
Converts a JSON string to dictionary.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
s (str): The JSON string to be converted.
|
||||||
|
retries (int): The number of times to retry parsing the string in case of a JSONDecodeError. Default is 3.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: The parsed dictionary from the JSON string.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
json.JSONDecodeError: If the string cannot be parsed into a dictionary after the specified number of retries.
|
||||||
|
"""
|
||||||
|
for attempt in range(retries):
|
||||||
|
try:
|
||||||
|
# Run json.loads directly since it's fast enough
|
||||||
|
return json.loads(s)
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
if attempt < retries - 1:
|
||||||
|
continue # Retry on failure
|
||||||
|
else:
|
||||||
|
raise e # Raise the error if all retries fail
|
@ -0,0 +1,63 @@
|
|||||||
|
# 🚀 Latest Updates to Swarms - Twitter Thread
|
||||||
|
|
||||||
|
🧵 Thread: Exciting new features in Swarms - the powerful AI agent framework! #AI #AGI #Development
|
||||||
|
|
||||||
|
1/9 🤖 Introducing the ReasoningDuo - a revolutionary dual-agent system that combines reasoning and execution agents for more robust and reliable outputs! Perfect for complex problem-solving tasks.
|
||||||
|
|
||||||
|
2/9 🔄 New Self-Consistency Agent with parallel processing:
|
||||||
|
- Generates multiple independent responses
|
||||||
|
- Uses ThreadPoolExecutor for concurrent execution
|
||||||
|
- Aggregates results for higher accuracy
|
||||||
|
#AI #ParallelProcessing
|
||||||
|
|
||||||
|
3/9 🎯 The ReasoningAgentRouter is here! Dynamically select and execute different reasoning strategies:
|
||||||
|
- ReasoningDuo
|
||||||
|
- Self-Consistency
|
||||||
|
- Iterative Reflective Expansion (IRE)
|
||||||
|
#AIAgents
|
||||||
|
|
||||||
|
4/9 💡 Advanced Reasoning Capabilities:
|
||||||
|
- Structured problem analysis
|
||||||
|
- Multiple solution exploration
|
||||||
|
- Bias detection and transparency
|
||||||
|
- Error handling strategies
|
||||||
|
#ArtificialIntelligence
|
||||||
|
|
||||||
|
5/9 ⚡️ Performance Improvements:
|
||||||
|
- Concurrent response generation
|
||||||
|
- Batched task processing
|
||||||
|
- Optimized thread management
|
||||||
|
- Improved error handling
|
||||||
|
#Performance
|
||||||
|
|
||||||
|
6/9 🎛️ Customization Options:
|
||||||
|
- Adjustable sample sizes
|
||||||
|
- Flexible model selection
|
||||||
|
- Customizable system prompts
|
||||||
|
- Multiple output formats
|
||||||
|
#Flexibility
|
||||||
|
|
||||||
|
7/9 🔍 Enhanced Evaluation Features:
|
||||||
|
- Response validation
|
||||||
|
- Answer checking
|
||||||
|
- Majority voting system
|
||||||
|
- Comprehensive logging
|
||||||
|
#QualityAssurance
|
||||||
|
|
||||||
|
8/9 📊 New Output Types:
|
||||||
|
- Dictionary format
|
||||||
|
- List format
|
||||||
|
- Conversation history
|
||||||
|
- Structured analysis
|
||||||
|
#DataScience
|
||||||
|
|
||||||
|
9/9 🌟 Coming Soon:
|
||||||
|
- More agent types
|
||||||
|
- Enhanced routing strategies
|
||||||
|
- Advanced aggregation methods
|
||||||
|
- Expanded model support
|
||||||
|
Stay tuned! #FutureOfAI
|
||||||
|
|
||||||
|
---
|
||||||
|
Follow for more updates on Swarms! 🚀
|
||||||
|
#AI #MachineLearning #AGI #Development
|
@ -0,0 +1,70 @@
|
|||||||
|
from litellm import completion
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
## [OPTIONAL] REGISTER MODEL - not all ollama models support function calling, litellm defaults to json mode tool calls if native tool calling not supported.
|
||||||
|
|
||||||
|
# litellm.register_model(model_cost={
|
||||||
|
# "ollama_chat/llama3.1": {
|
||||||
|
# "supports_function_calling": true
|
||||||
|
# },
|
||||||
|
# })
|
||||||
|
|
||||||
|
tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_current_weather",
|
||||||
|
"description": "Retrieve detailed current weather information for a specified location, including temperature, humidity, wind speed, and atmospheric conditions.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"location": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The city and state, e.g. San Francisco, CA, or a specific geographic coordinate in the format 'latitude,longitude'.",
|
||||||
|
},
|
||||||
|
"unit": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["celsius", "fahrenheit", "kelvin"],
|
||||||
|
"description": "The unit of temperature measurement to be used in the response.",
|
||||||
|
},
|
||||||
|
"include_forecast": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Indicates whether to include a short-term weather forecast along with the current conditions.",
|
||||||
|
},
|
||||||
|
"time": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "date-time",
|
||||||
|
"description": "Optional parameter to specify the time for which the weather data is requested, in ISO 8601 format.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"location",
|
||||||
|
"unit",
|
||||||
|
"include_forecast",
|
||||||
|
"time",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "What's the weather like in Boston today?",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
response = completion(
|
||||||
|
model="gpt-4o-mini",
|
||||||
|
messages=messages,
|
||||||
|
tools=tools,
|
||||||
|
tool_choice="auto",
|
||||||
|
parallel_tool_calls=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(response.choices[0].message.tool_calls[0].function.arguments)
|
||||||
|
print(response.choices[0].message)
|
@ -0,0 +1,229 @@
|
|||||||
|
import asyncio
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
from swarms.utils.litellm_wrapper import LiteLLM
|
||||||
|
|
||||||
|
# Configure loguru logger
|
||||||
|
logger.remove() # Remove default handler
|
||||||
|
logger.add(
|
||||||
|
"test_litellm.log",
|
||||||
|
rotation="1 MB",
|
||||||
|
format="{time} | {level} | {message}",
|
||||||
|
level="DEBUG",
|
||||||
|
)
|
||||||
|
logger.add(sys.stdout, level="INFO")
|
||||||
|
|
||||||
|
|
||||||
|
tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_current_weather",
|
||||||
|
"description": "Retrieve detailed current weather information for a specified location, including temperature, humidity, wind speed, and atmospheric conditions.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"location": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The city and state, e.g. San Francisco, CA, or a specific geographic coordinate in the format 'latitude,longitude'.",
|
||||||
|
},
|
||||||
|
"unit": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["celsius", "fahrenheit", "kelvin"],
|
||||||
|
"description": "The unit of temperature measurement to be used in the response.",
|
||||||
|
},
|
||||||
|
"include_forecast": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Indicates whether to include a short-term weather forecast along with the current conditions.",
|
||||||
|
},
|
||||||
|
"time": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "date-time",
|
||||||
|
"description": "Optional parameter to specify the time for which the weather data is requested, in ISO 8601 format.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"location",
|
||||||
|
"unit",
|
||||||
|
"include_forecast",
|
||||||
|
"time",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Initialize LiteLLM with streaming enabled
|
||||||
|
llm = LiteLLM(model_name="gpt-4o-mini", tools_list_dictionary=tools)
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
# When streaming is enabled, arun returns a stream of chunks
|
||||||
|
# We need to handle these chunks directly, not try to access .choices
|
||||||
|
stream = await llm.arun("What is the weather in San Francisco?")
|
||||||
|
|
||||||
|
logger.info(f"Received stream from LLM. {stream}")
|
||||||
|
|
||||||
|
if stream is not None:
|
||||||
|
logger.info(f"Stream is not None. {stream}")
|
||||||
|
else:
|
||||||
|
logger.info("Stream is None.")
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_suite():
|
||||||
|
"""Run all test cases and generate a comprehensive report."""
|
||||||
|
logger.info("Starting LiteLLM Test Suite")
|
||||||
|
total_tests = 0
|
||||||
|
passed_tests = 0
|
||||||
|
failed_tests = []
|
||||||
|
|
||||||
|
def log_test_result(test_name: str, passed: bool, error=None):
|
||||||
|
nonlocal total_tests, passed_tests
|
||||||
|
total_tests += 1
|
||||||
|
if passed:
|
||||||
|
passed_tests += 1
|
||||||
|
logger.success(f"✅ {test_name} - PASSED")
|
||||||
|
else:
|
||||||
|
failed_tests.append((test_name, error))
|
||||||
|
logger.error(f"❌ {test_name} - FAILED: {error}")
|
||||||
|
|
||||||
|
# Test 1: Basic Initialization
|
||||||
|
try:
|
||||||
|
logger.info("Testing basic initialization")
|
||||||
|
llm = LiteLLM()
|
||||||
|
assert llm.model_name == "gpt-4o"
|
||||||
|
assert llm.temperature == 0.5
|
||||||
|
assert llm.max_tokens == 4000
|
||||||
|
log_test_result("Basic Initialization", True)
|
||||||
|
except Exception as e:
|
||||||
|
log_test_result("Basic Initialization", False, str(e))
|
||||||
|
|
||||||
|
# Test 2: Custom Parameters
|
||||||
|
try:
|
||||||
|
logger.info("Testing custom parameters")
|
||||||
|
llm = LiteLLM(
|
||||||
|
model_name="gpt-3.5-turbo",
|
||||||
|
temperature=0.7,
|
||||||
|
max_tokens=2000,
|
||||||
|
system_prompt="You are a helpful assistant",
|
||||||
|
)
|
||||||
|
assert llm.model_name == "gpt-3.5-turbo"
|
||||||
|
assert llm.temperature == 0.7
|
||||||
|
assert llm.max_tokens == 2000
|
||||||
|
assert llm.system_prompt == "You are a helpful assistant"
|
||||||
|
log_test_result("Custom Parameters", True)
|
||||||
|
except Exception as e:
|
||||||
|
log_test_result("Custom Parameters", False, str(e))
|
||||||
|
|
||||||
|
# Test 3: Message Preparation
|
||||||
|
try:
|
||||||
|
logger.info("Testing message preparation")
|
||||||
|
llm = LiteLLM(system_prompt="Test system prompt")
|
||||||
|
messages = llm._prepare_messages("Test task")
|
||||||
|
assert len(messages) == 2
|
||||||
|
assert messages[0]["role"] == "system"
|
||||||
|
assert messages[0]["content"] == "Test system prompt"
|
||||||
|
assert messages[1]["role"] == "user"
|
||||||
|
assert messages[1]["content"] == "Test task"
|
||||||
|
log_test_result("Message Preparation", True)
|
||||||
|
except Exception as e:
|
||||||
|
log_test_result("Message Preparation", False, str(e))
|
||||||
|
|
||||||
|
# Test 4: Basic Completion
|
||||||
|
try:
|
||||||
|
logger.info("Testing basic completion")
|
||||||
|
llm = LiteLLM()
|
||||||
|
response = llm.run("What is 2+2?")
|
||||||
|
assert isinstance(response, str)
|
||||||
|
assert len(response) > 0
|
||||||
|
log_test_result("Basic Completion", True)
|
||||||
|
except Exception as e:
|
||||||
|
log_test_result("Basic Completion", False, str(e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# tool usage
|
||||||
|
asyncio.run(main())
|
||||||
|
except Exception as e:
|
||||||
|
log_test_result("Tool Usage", False, str(e))
|
||||||
|
|
||||||
|
# Test 5: Tool Calling
|
||||||
|
try:
|
||||||
|
logger.info("Testing tool calling")
|
||||||
|
tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "test_function",
|
||||||
|
"description": "A test function",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {"test": {"type": "string"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
llm = LiteLLM(
|
||||||
|
tools_list_dictionary=tools,
|
||||||
|
tool_choice="auto",
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
)
|
||||||
|
assert llm.tools_list_dictionary == tools
|
||||||
|
assert llm.tool_choice == "auto"
|
||||||
|
log_test_result("Tool Calling Setup", True)
|
||||||
|
except Exception as e:
|
||||||
|
log_test_result("Tool Calling Setup", False, str(e))
|
||||||
|
|
||||||
|
# Test 6: Async Completion
|
||||||
|
async def test_async():
|
||||||
|
try:
|
||||||
|
logger.info("Testing async completion")
|
||||||
|
llm = LiteLLM()
|
||||||
|
response = await llm.arun("What is 3+3?")
|
||||||
|
assert isinstance(response, str)
|
||||||
|
assert len(response) > 0
|
||||||
|
log_test_result("Async Completion", True)
|
||||||
|
except Exception as e:
|
||||||
|
log_test_result("Async Completion", False, str(e))
|
||||||
|
|
||||||
|
asyncio.run(test_async())
|
||||||
|
|
||||||
|
# Test 7: Batched Run
|
||||||
|
try:
|
||||||
|
logger.info("Testing batched run")
|
||||||
|
llm = LiteLLM()
|
||||||
|
tasks = ["Task 1", "Task 2", "Task 3"]
|
||||||
|
responses = llm.batched_run(tasks, batch_size=2)
|
||||||
|
assert isinstance(responses, list)
|
||||||
|
assert len(responses) == 3
|
||||||
|
log_test_result("Batched Run", True)
|
||||||
|
except Exception as e:
|
||||||
|
log_test_result("Batched Run", False, str(e))
|
||||||
|
|
||||||
|
# Generate test report
|
||||||
|
success_rate = (passed_tests / total_tests) * 100
|
||||||
|
logger.info("\n=== Test Suite Report ===")
|
||||||
|
logger.info(f"Total Tests: {total_tests}")
|
||||||
|
logger.info(f"Passed Tests: {passed_tests}")
|
||||||
|
logger.info(f"Failed Tests: {len(failed_tests)}")
|
||||||
|
logger.info(f"Success Rate: {success_rate:.2f}%")
|
||||||
|
|
||||||
|
if failed_tests:
|
||||||
|
logger.error("\nFailed Tests Details:")
|
||||||
|
for test_name, error in failed_tests:
|
||||||
|
logger.error(f"{test_name}: {error}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_tests": total_tests,
|
||||||
|
"passed_tests": passed_tests,
|
||||||
|
"failed_tests": failed_tests,
|
||||||
|
"success_rate": success_rate,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
test_results = run_test_suite()
|
||||||
|
logger.info(
|
||||||
|
"Test suite completed. Check test_litellm.log for detailed logs."
|
||||||
|
)
|
@ -0,0 +1,142 @@
|
|||||||
|
import os
|
||||||
|
import requests
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import json
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Retrieve API key securely from .env
|
||||||
|
API_KEY = os.getenv("SWARMS_API_KEY")
|
||||||
|
BASE_URL = "https://api.swarms.world"
|
||||||
|
|
||||||
|
# Headers for secure API communication
|
||||||
|
headers = {"x-api-key": API_KEY, "Content-Type": "application/json"}
|
||||||
|
|
||||||
|
|
||||||
|
def create_medical_swarm(patient_case: str):
|
||||||
|
"""
|
||||||
|
Constructs and triggers a full-stack medical swarm consisting of three agents:
|
||||||
|
Diagnostic Specialist, Medical Coder, and Treatment Advisor.
|
||||||
|
Each agent is provided with a comprehensive, detailed system prompt to ensure high reliability.
|
||||||
|
"""
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"swarm_name": "Enhanced Medical Diagnostic Swarm",
|
||||||
|
"description": "A swarm of agents specialized in performing comprehensive medical diagnostics, analysis, and coding.",
|
||||||
|
"agents": [
|
||||||
|
{
|
||||||
|
"agent_name": "Diagnostic Specialist",
|
||||||
|
"description": "Agent specialized in analyzing patient history, symptoms, lab results, and imaging data to produce accurate diagnoses.",
|
||||||
|
"system_prompt": (
|
||||||
|
"You are an experienced, board-certified medical diagnostician with over 20 years of clinical practice. "
|
||||||
|
"Your role is to analyze all available patient information—including history, symptoms, lab tests, and imaging results—"
|
||||||
|
"with extreme attention to detail and clinical nuance. Provide a comprehensive differential diagnosis considering "
|
||||||
|
"common, uncommon, and rare conditions. Always cross-reference clinical guidelines and evidence-based medicine. "
|
||||||
|
"Explain your reasoning step by step and provide a final prioritized list of potential diagnoses along with their likelihood. "
|
||||||
|
"Consider patient demographics, comorbidities, and risk factors. Your diagnosis should be reliable, clear, and actionable."
|
||||||
|
),
|
||||||
|
"model_name": "openai/gpt-4o",
|
||||||
|
"role": "worker",
|
||||||
|
"max_loops": 2,
|
||||||
|
"max_tokens": 4000,
|
||||||
|
"temperature": 0.3,
|
||||||
|
"auto_generate_prompt": False,
|
||||||
|
"tools_dictionary": [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "search_topic",
|
||||||
|
"description": "Conduct an in-depth search on a specified topic or subtopic, generating a comprehensive array of highly detailed search queries tailored to the input parameters.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"depth": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Indicates the level of thoroughness for the search. Values range from 1 to 3, where 1 represents a superficial search and 3 signifies an exploration of the topic.",
|
||||||
|
},
|
||||||
|
"detailed_queries": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "An array of highly specific search queries that are generated based on the input query and the specified depth. Each query should be designed to elicit detailed and relevant information from various sources.",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Each item in this array should represent a unique search query that targets a specific aspect of the main topic, ensuring a comprehensive exploration of the subject matter.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"depth",
|
||||||
|
"detailed_queries",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"agent_name": "Medical Coder",
|
||||||
|
"description": "Agent responsible for translating medical diagnoses and procedures into accurate standardized medical codes (ICD-10, CPT, etc.).",
|
||||||
|
"system_prompt": (
|
||||||
|
"You are a certified and experienced medical coder, well-versed in ICD-10, CPT, and other coding systems. "
|
||||||
|
"Your task is to convert detailed medical diagnoses and treatment procedures into precise, standardized codes. "
|
||||||
|
"Consider all aspects of the clinical documentation including severity, complications, and comorbidities. "
|
||||||
|
"Provide clear explanations for the codes chosen, referencing the latest coding guidelines and payer policies where relevant. "
|
||||||
|
"Your output should be comprehensive, reliable, and fully compliant with current medical coding standards."
|
||||||
|
),
|
||||||
|
"model_name": "openai/gpt-4o",
|
||||||
|
"role": "worker",
|
||||||
|
"max_loops": 1,
|
||||||
|
"max_tokens": 3000,
|
||||||
|
"temperature": 0.2,
|
||||||
|
"auto_generate_prompt": False,
|
||||||
|
"tools_dictionary": [],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"agent_name": "Treatment Advisor",
|
||||||
|
"description": "Agent dedicated to suggesting evidence-based treatment options, including pharmaceutical and non-pharmaceutical interventions.",
|
||||||
|
"system_prompt": (
|
||||||
|
"You are a highly knowledgeable medical treatment specialist with expertise in the latest clinical guidelines and research. "
|
||||||
|
"Based on the diagnostic conclusions provided, your task is to recommend a comprehensive treatment plan. "
|
||||||
|
"Your suggestions should include first-line therapies, potential alternative treatments, and considerations for patient-specific factors "
|
||||||
|
"such as allergies, contraindications, and comorbidities. Explain the rationale behind each treatment option and reference clinical guidelines where applicable. "
|
||||||
|
"Your recommendations should be reliable, detailed, and clearly prioritized based on efficacy and safety."
|
||||||
|
),
|
||||||
|
"model_name": "openai/gpt-4o",
|
||||||
|
"role": "worker",
|
||||||
|
"max_loops": 1,
|
||||||
|
"max_tokens": 5000,
|
||||||
|
"temperature": 0.3,
|
||||||
|
"auto_generate_prompt": False,
|
||||||
|
"tools_dictionary": [],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"max_loops": 3,
|
||||||
|
"swarm_type": "SequentialWorkflow",
|
||||||
|
"task": patient_case,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Payload includes the patient case as the task to be processed by the swarm
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
f"{BASE_URL}/v1/swarm/completions",
|
||||||
|
headers=headers,
|
||||||
|
json=payload,
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
print("Swarm successfully executed!")
|
||||||
|
return json.dumps(response.json(), indent=4)
|
||||||
|
else:
|
||||||
|
print(f"Error {response.status_code}: {response.text}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# Example Patient Task for the Swarm to diagnose and analyze
|
||||||
|
if __name__ == "__main__":
|
||||||
|
patient_case = (
|
||||||
|
"Patient is a 55-year-old male presenting with severe chest pain, shortness of breath, elevated blood pressure, "
|
||||||
|
"nausea, and a family history of cardiovascular disease. Blood tests show elevated troponin levels, and EKG indicates ST-segment elevations. "
|
||||||
|
"The patient is currently unstable. Provide a detailed diagnosis, coding, and treatment plan."
|
||||||
|
)
|
||||||
|
|
||||||
|
diagnostic_output = create_medical_swarm(patient_case)
|
||||||
|
print(diagnostic_output)
|
Loading…
Reference in new issue