Add files via upload

pull/994/head
CI-DEV 2 months ago committed by GitHub
parent a38e21e05b
commit 3ce827b0b3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,414 @@
"""
GraphWorkflow API Examples
This file demonstrates how to use the Swarms API correctly with the proper format
and cheapest models for real-world GraphWorkflow scenarios.
"""
import os
import requests
import json
from typing import Dict, Any, List
from datetime import datetime
# API Configuration - Get API key from environment variable
API_KEY = os.getenv("SWARMS_API_KEY")
if not API_KEY:
print("⚠️ Warning: SWARMS_API_KEY environment variable not set.")
print(" Please set your API key: export SWARMS_API_KEY='your-api-key-here'")
print(" Or set it in your environment variables.")
API_KEY = "your-api-key-here" # Placeholder for demonstration
BASE_URL = "https://api.swarms.world"
headers = {
"x-api-key": API_KEY,
"Content-Type": "application/json"
}
class SwarmsAPIExamples:
"""Examples of using Swarms API for GraphWorkflow scenarios."""
def __init__(self):
"""Initialize API examples."""
self.results = {}
def health_check(self):
"""Check API health."""
try:
response = requests.get(f"{BASE_URL}/health", headers=headers)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Health check failed: {e}")
return None
def run_single_agent(self, task: str, agent_name: str = "Research Analyst"):
"""Run a single agent with the cheapest model."""
payload = {
"agent_config": {
"agent_name": agent_name,
"description": "An expert agent for various tasks",
"system_prompt": (
"You are an expert assistant. Provide clear, concise, and accurate responses "
"to the given task. Focus on practical solutions and actionable insights."
),
"model_name": "gpt-4o-mini", # Cheapest model
"role": "worker",
"max_loops": 1,
"max_tokens": 4096, # Reduced for cost
"temperature": 0.7,
"auto_generate_prompt": False,
"tools_list_dictionary": None,
},
"task": task,
}
try:
response = requests.post(
f"{BASE_URL}/v1/agent/completions",
headers=headers,
json=payload
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Single agent request failed: {e}")
return None
def run_sequential_swarm(self, task: str, agents: List[Dict[str, str]]):
"""Run a sequential swarm with multiple agents."""
payload = {
"name": "Sequential Workflow",
"description": "Multi-agent sequential workflow",
"agents": [
{
"agent_name": agent["name"],
"description": agent["description"],
"system_prompt": agent["system_prompt"],
"model_name": "gpt-4o-mini", # Cheapest model
"role": "worker",
"max_loops": 1,
"max_tokens": 4096, # Reduced for cost
"temperature": 0.7,
"auto_generate_prompt": False
}
for agent in agents
],
"max_loops": 1,
"swarm_type": "SequentialWorkflow",
"task": task
}
try:
response = requests.post(
f"{BASE_URL}/v1/swarm/completions",
headers=headers,
json=payload
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Sequential swarm request failed: {e}")
return None
def run_concurrent_swarm(self, task: str, agents: List[Dict[str, str]]):
"""Run a concurrent swarm with multiple agents."""
payload = {
"name": "Concurrent Workflow",
"description": "Multi-agent concurrent workflow",
"agents": [
{
"agent_name": agent["name"],
"description": agent["description"],
"system_prompt": agent["system_prompt"],
"model_name": "gpt-4o-mini", # Cheapest model
"role": "worker",
"max_loops": 1,
"max_tokens": 4096, # Reduced for cost
"temperature": 0.7,
"auto_generate_prompt": False
}
for agent in agents
],
"max_loops": 1,
"swarm_type": "ConcurrentWorkflow",
"task": task
}
try:
response = requests.post(
f"{BASE_URL}/v1/swarm/completions",
headers=headers,
json=payload
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Concurrent swarm request failed: {e}")
return None
def example_software_development_pipeline(self):
"""Example: Software Development Pipeline using Swarms API."""
print("\n🔧 Example: Software Development Pipeline")
print("-" * 50)
# Define agents for software development
agents = [
{
"name": "CodeGenerator",
"description": "Generates clean, well-documented code",
"system_prompt": "You are an expert Python developer. Generate clean, well-documented code with proper error handling and documentation."
},
{
"name": "CodeReviewer",
"description": "Reviews code for bugs and best practices",
"system_prompt": "You are a senior code reviewer. Check for bugs, security issues, and best practices. Provide specific feedback and suggestions."
},
{
"name": "TestGenerator",
"description": "Generates comprehensive unit tests",
"system_prompt": "You are a QA engineer. Generate comprehensive unit tests for the given code with good coverage and edge cases."
}
]
task = "Create a Python function that implements a binary search algorithm with proper error handling and documentation"
result = self.run_sequential_swarm(task, agents)
if result:
print("✅ Software Development Pipeline completed successfully")
# Debug: Print the full response structure
print(f"🔍 Response keys: {list(result.keys()) if isinstance(result, dict) else 'Not a dict'}")
# Try different possible result keys
result_text = (
result.get('result') or
result.get('response') or
result.get('content') or
result.get('output') or
result.get('data') or
str(result)[:200]
)
print(f"📝 Result: {result_text[:200] if result_text else 'No result'}...")
else:
print("❌ Software Development Pipeline failed")
return result
def example_data_analysis_pipeline(self):
"""Example: Data Analysis Pipeline using Swarms API."""
print("\n📊 Example: Data Analysis Pipeline")
print("-" * 50)
# Define agents for data analysis
agents = [
{
"name": "DataExplorer",
"description": "Explores and analyzes data patterns",
"system_prompt": "You are a data scientist. Analyze the given data, identify patterns, trends, and key insights. Provide clear explanations."
},
{
"name": "StatisticalAnalyst",
"description": "Performs statistical analysis",
"system_prompt": "You are a statistical analyst. Perform statistical analysis on the data, identify correlations, and provide statistical insights."
},
{
"name": "ReportWriter",
"description": "Creates comprehensive reports",
"system_prompt": "You are a report writer. Create comprehensive, well-structured reports based on the analysis. Include executive summaries and actionable recommendations."
}
]
task = "Analyze this customer transaction data and provide insights on purchasing patterns, customer segments, and recommendations for business growth"
result = self.run_sequential_swarm(task, agents)
if result:
print("✅ Data Analysis Pipeline completed successfully")
# Try different possible result keys
result_text = (
result.get('result') or
result.get('response') or
result.get('content') or
result.get('output') or
result.get('data') or
str(result)[:200]
)
print(f"📝 Result: {result_text[:200] if result_text else 'No result'}...")
else:
print("❌ Data Analysis Pipeline failed")
return result
def example_business_process_workflow(self):
"""Example: Business Process Workflow using Swarms API."""
print("\n💼 Example: Business Process Workflow")
print("-" * 50)
# Define agents for business process
agents = [
{
"name": "BusinessAnalyst",
"description": "Analyzes business requirements and processes",
"system_prompt": "You are a business analyst. Analyze business requirements, identify process improvements, and provide strategic recommendations."
},
{
"name": "ProcessDesigner",
"description": "Designs optimized business processes",
"system_prompt": "You are a process designer. Design optimized business processes based on analysis, considering efficiency, cost, and scalability."
},
{
"name": "ImplementationPlanner",
"description": "Plans implementation strategies",
"system_prompt": "You are an implementation planner. Create detailed implementation plans, timelines, and resource requirements for process changes."
}
]
task = "Analyze our current customer onboarding process and design an optimized workflow that reduces time-to-value while maintaining quality"
result = self.run_sequential_swarm(task, agents)
if result:
print("✅ Business Process Workflow completed successfully")
# Try different possible result keys
result_text = (
result.get('result') or
result.get('response') or
result.get('content') or
result.get('output') or
result.get('data') or
str(result)[:200]
)
print(f"📝 Result: {result_text[:200] if result_text else 'No result'}...")
else:
print("❌ Business Process Workflow failed")
return result
def example_concurrent_research(self):
"""Example: Concurrent Research using Swarms API."""
print("\n🔍 Example: Concurrent Research")
print("-" * 50)
# Define agents for concurrent research
agents = [
{
"name": "MarketResearcher",
"description": "Researches market trends and competition",
"system_prompt": "You are a market researcher. Research market trends, competitive landscape, and industry developments. Focus on actionable insights."
},
{
"name": "TechnologyAnalyst",
"description": "Analyzes technology trends and innovations",
"system_prompt": "You are a technology analyst. Research technology trends, innovations, and emerging technologies. Provide technical insights and predictions."
},
{
"name": "FinancialAnalyst",
"description": "Analyzes financial data and market performance",
"system_prompt": "You are a financial analyst. Analyze financial data, market performance, and economic indicators. Provide financial insights and forecasts."
}
]
task = "Research the current state of artificial intelligence in healthcare, including market size, key players, technological advances, and future opportunities"
result = self.run_concurrent_swarm(task, agents)
if result:
print("✅ Concurrent Research completed successfully")
# Try different possible result keys
result_text = (
result.get('result') or
result.get('response') or
result.get('content') or
result.get('output') or
result.get('data') or
str(result)[:200]
)
print(f"📝 Result: {result_text[:200] if result_text else 'No result'}...")
else:
print("❌ Concurrent Research failed")
return result
def run_all_examples(self):
"""Run all API examples."""
print("🚀 Starting Swarms API Examples")
print("=" * 60)
# Check API health first
print("\n🔍 Checking API Health...")
health = self.health_check()
if health:
print("✅ API is healthy")
else:
print("❌ API health check failed")
return
# Run examples
examples = [
self.example_software_development_pipeline,
self.example_data_analysis_pipeline,
self.example_business_process_workflow,
self.example_concurrent_research,
]
for example in examples:
try:
result = example()
if result:
self.results[example.__name__] = result
except Exception as e:
print(f"❌ Example {example.__name__} failed: {e}")
self.results[example.__name__] = {"error": str(e)}
# Generate summary
self.generate_summary()
return self.results
def generate_summary(self):
"""Generate a summary of all examples."""
print("\n" + "=" * 60)
print("📊 SWARMS API EXAMPLES SUMMARY")
print("=" * 60)
successful = sum(1 for result in self.results.values() if "error" not in result)
failed = len(self.results) - successful
print(f"Total Examples: {len(self.results)}")
print(f"✅ Successful: {successful}")
print(f"❌ Failed: {failed}")
print("\n📈 Results:")
print("-" * 60)
for name, result in self.results.items():
if "error" in result:
print(f"{name}: {result['error']}")
else:
print(f"{name}: Completed successfully")
# Save results to file
report_data = {
"summary": {
"total_examples": len(self.results),
"successful": successful,
"failed": failed,
"timestamp": datetime.now().isoformat()
},
"results": self.results
}
with open("swarms_api_examples_report.json", "w") as f:
json.dump(report_data, f, indent=2)
print(f"\n📄 Detailed report saved to: swarms_api_examples_report.json")
def main():
"""Main function to run all API examples."""
examples = SwarmsAPIExamples()
results = examples.run_all_examples()
return results
if __name__ == "__main__":
# Run API examples
main()

File diff suppressed because it is too large Load Diff

@ -0,0 +1,329 @@
"""
Simple GraphWorkflow Examples
Quick examples demonstrating basic GraphWorkflow functionality.
These examples are designed to be easy to run and understand.
"""
import asyncio
import os
import sys
# Add the parent directory to the path so we can import from swarms
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from swarms import Agent
from swarms.structs.graph_workflow import GraphWorkflow, Node, Edge, NodeType, EdgeType
# Check for API key in environment variables
if not os.getenv("OPENAI_API_KEY"):
print("⚠️ Warning: OPENAI_API_KEY environment variable not set.")
print(" Please set your API key: export OPENAI_API_KEY='your-api-key-here'")
print(" Or set it in your environment variables.")
async def example_1_basic_workflow():
"""Example 1: Basic workflow with two simple tasks."""
print("\n🔧 Example 1: Basic Workflow")
print("-" * 40)
# Create workflow
workflow = GraphWorkflow(name="Basic Example")
# Define simple functions
def task_1(**kwargs):
return {"message": "Hello from Task 1", "data": [1, 2, 3]}
def task_2(**kwargs):
message = kwargs.get('message', '')
data = kwargs.get('data', [])
return {"final_result": f"{message} - Processed {len(data)} items"}
# Create nodes
node1 = Node(
id="task_1",
type=NodeType.TASK,
callable=task_1,
output_keys=["message", "data"]
)
node2 = Node(
id="task_2",
type=NodeType.TASK,
callable=task_2,
required_inputs=["message", "data"],
output_keys=["final_result"]
)
# Add nodes and edges
workflow.add_node(node1)
workflow.add_node(node2)
workflow.add_edge(Edge(source="task_1", target="task_2"))
# Set entry and end points
workflow.set_entry_points(["task_1"])
workflow.set_end_points(["task_2"])
# Run workflow
result = await workflow.run("Basic workflow example")
print(f"Result: {result['context_data']['final_result']}")
return result
async def example_2_agent_workflow():
"""Example 2: Workflow with AI agents."""
print("\n🤖 Example 2: Agent Workflow")
print("-" * 40)
# Create agents with cheapest models
writer = Agent(
agent_name="Writer",
system_prompt="You are a creative writer. Write engaging content.",
model_name="gpt-3.5-turbo" # Cheaper model
)
editor = Agent(
agent_name="Editor",
system_prompt="You are an editor. Review and improve the content.",
model_name="gpt-3.5-turbo" # Cheaper model
)
# Create workflow
workflow = GraphWorkflow(name="Content Creation")
# Create nodes
writer_node = Node(
id="writer",
type=NodeType.AGENT,
agent=writer,
output_keys=["content"],
timeout=60.0
)
editor_node = Node(
id="editor",
type=NodeType.AGENT,
agent=editor,
required_inputs=["content"],
output_keys=["edited_content"],
timeout=60.0
)
# Add nodes and edges
workflow.add_node(writer_node)
workflow.add_node(editor_node)
workflow.add_edge(Edge(source="writer", target="editor"))
# Set entry and end points
workflow.set_entry_points(["writer"])
workflow.set_end_points(["editor"])
# Run workflow
result = await workflow.run("Write a short story about a robot learning to paint")
print(f"Content created: {result['context_data']['edited_content'][:100]}...")
return result
async def example_3_conditional_workflow():
"""Example 3: Workflow with conditional logic."""
print("\n🔀 Example 3: Conditional Workflow")
print("-" * 40)
# Create workflow
workflow = GraphWorkflow(name="Conditional Example")
# Define functions
def generate_number(**kwargs):
import random
number = random.randint(1, 100)
return {"number": number}
def check_even(**kwargs):
number = kwargs.get('number', 0)
return number % 2 == 0
def process_even(**kwargs):
number = kwargs.get('number', 0)
return {"result": f"Even number {number} processed"}
def process_odd(**kwargs):
number = kwargs.get('number', 0)
return {"result": f"Odd number {number} processed"}
# Create nodes - using TASK type for condition since CONDITION doesn't exist
nodes = [
Node(id="generate", type=NodeType.TASK, callable=generate_number, output_keys=["number"]),
Node(id="check", type=NodeType.TASK, callable=check_even, required_inputs=["number"], output_keys=["is_even"]),
Node(id="even_process", type=NodeType.TASK, callable=process_even, required_inputs=["number"], output_keys=["result"]),
Node(id="odd_process", type=NodeType.TASK, callable=process_odd, required_inputs=["number"], output_keys=["result"]),
]
# Add nodes
for node in nodes:
workflow.add_node(node)
# Add edges - simplified without conditional edges
workflow.add_edge(Edge(source="generate", target="check"))
workflow.add_edge(Edge(source="check", target="even_process"))
workflow.add_edge(Edge(source="check", target="odd_process"))
# Set entry and end points
workflow.set_entry_points(["generate"])
workflow.set_end_points(["even_process", "odd_process"])
# Run workflow
result = await workflow.run("Process a random number")
print(f"Result: {result['context_data'].get('result', 'No result')}")
return result
async def example_4_data_processing():
"""Example 4: Data processing workflow."""
print("\n📊 Example 4: Data Processing")
print("-" * 40)
# Create workflow
workflow = GraphWorkflow(name="Data Processing")
# Define data processing functions
def create_data(**kwargs):
return {"raw_data": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
def filter_data(**kwargs):
data = kwargs.get('raw_data', [])
filtered = [x for x in data if x % 2 == 0]
return {"filtered_data": filtered}
def calculate_stats(**kwargs):
data = kwargs.get('filtered_data', [])
return {
"stats": {
"count": len(data),
"sum": sum(data),
"average": sum(data) / len(data) if data else 0
}
}
# Create nodes - using TASK type instead of DATA_PROCESSOR
nodes = [
Node(id="create", type=NodeType.TASK, callable=create_data, output_keys=["raw_data"]),
Node(id="filter", type=NodeType.TASK, callable=filter_data, required_inputs=["raw_data"], output_keys=["filtered_data"]),
Node(id="stats", type=NodeType.TASK, callable=calculate_stats, required_inputs=["filtered_data"], output_keys=["stats"]),
]
# Add nodes
for node in nodes:
workflow.add_node(node)
# Add edges
workflow.add_edge(Edge(source="create", target="filter"))
workflow.add_edge(Edge(source="filter", target="stats"))
# Set entry and end points
workflow.set_entry_points(["create"])
workflow.set_end_points(["stats"])
# Run workflow
result = await workflow.run("Process and analyze data")
print(f"Statistics: {result['context_data']['stats']}")
return result
async def example_5_parallel_execution():
"""Example 5: Parallel execution workflow."""
print("\n⚡ Example 5: Parallel Execution")
print("-" * 40)
# Create workflow
workflow = GraphWorkflow(name="Parallel Example")
# Define parallel tasks
def task_a(**kwargs):
import time
time.sleep(0.1) # Simulate work
return {"result_a": "Task A completed"}
def task_b(**kwargs):
import time
time.sleep(0.1) # Simulate work
return {"result_b": "Task B completed"}
def task_c(**kwargs):
import time
time.sleep(0.1) # Simulate work
return {"result_c": "Task C completed"}
def merge_results(**kwargs):
results = []
for key in ['result_a', 'result_b', 'result_c']:
if key in kwargs:
results.append(kwargs[key])
return {"merged": results}
# Create nodes - using TASK type instead of MERGE
nodes = [
Node(id="task_a", type=NodeType.TASK, callable=task_a, output_keys=["result_a"], parallel=True),
Node(id="task_b", type=NodeType.TASK, callable=task_b, output_keys=["result_b"], parallel=True),
Node(id="task_c", type=NodeType.TASK, callable=task_c, output_keys=["result_c"], parallel=True),
Node(id="merge", type=NodeType.TASK, callable=merge_results, required_inputs=["result_a", "result_b", "result_c"], output_keys=["merged"]),
]
# Add nodes
for node in nodes:
workflow.add_node(node)
# Add edges (all parallel tasks feed into merge)
workflow.add_edge(Edge(source="task_a", target="merge"))
workflow.add_edge(Edge(source="task_b", target="merge"))
workflow.add_edge(Edge(source="task_c", target="merge"))
# Set entry and end points
workflow.set_entry_points(["task_a", "task_b", "task_c"])
workflow.set_end_points(["merge"])
# Run workflow
result = await workflow.run("Execute parallel tasks")
print(f"Merged results: {result['context_data']['merged']}")
return result
async def run_all_examples():
"""Run all simple examples."""
print("🚀 Running GraphWorkflow Simple Examples")
print("=" * 50)
examples = [
example_1_basic_workflow,
example_2_agent_workflow,
example_3_conditional_workflow,
example_4_data_processing,
example_5_parallel_execution,
]
results = {}
for i, example in enumerate(examples, 1):
try:
print(f"\n📝 Running Example {i}...")
result = await example()
results[f"example_{i}"] = result
print(f"✅ Example {i} completed successfully")
except Exception as e:
print(f"❌ Example {i} failed: {e}")
results[f"example_{i}"] = {"error": str(e)}
print("\n" + "=" * 50)
print("🎉 All examples completed!")
print(f"✅ Successful: {sum(1 for r in results.values() if 'error' not in r)}")
print(f"❌ Failed: {sum(1 for r in results.values() if 'error' in r)}")
return results
if __name__ == "__main__":
# Run all examples
asyncio.run(run_all_examples())
Loading…
Cancel
Save