pull/1078/head
parent
92c857e3b9
commit
fc72304491
@ -0,0 +1,41 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.swarm_router import SwarmRouter
|
||||
|
||||
# Initialize the ETF-focused agent
|
||||
agent = Agent(
|
||||
agent_name="ETF-Research-Agent",
|
||||
agent_description="Specialized agent for researching, analyzing, and recommending Exchange-Traded Funds (ETFs) across various sectors and markets.",
|
||||
model_name="groq/moonshotai/kimi-k2-instruct",
|
||||
dynamic_temperature_enabled=True,
|
||||
max_loops=1,
|
||||
dynamic_context_window=True,
|
||||
)
|
||||
|
||||
agent_two = Agent(
|
||||
agent_name="ETF-Research-Agent-2",
|
||||
agent_description="Specialized agent for researching, analyzing, and recommending Exchange-Traded Funds (ETFs) across various sectors and markets.",
|
||||
model_name="groq/moonshotai/kimi-k2-instruct",
|
||||
dynamic_temperature_enabled=True,
|
||||
max_loops=1,
|
||||
dynamic_context_window=True,
|
||||
)
|
||||
|
||||
|
||||
# Create workflow with default settings
|
||||
workflow = SwarmRouter(
|
||||
agents=[agent, agent_two],
|
||||
swarm_type="BatchedGridWorkflow",
|
||||
output_type="dict",
|
||||
)
|
||||
|
||||
# Define simple tasks
|
||||
tasks = [
|
||||
"What are the best GOLD ETFs?",
|
||||
"What are the best american energy ETFs?",
|
||||
]
|
||||
|
||||
# Run the workflow
|
||||
result = workflow.run(tasks=tasks)
|
||||
|
||||
|
||||
print(result)
|
@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Basic Hierarchical Swarm Streaming Demo
|
||||
|
||||
Minimal example showing the core streaming callback functionality.
|
||||
"""
|
||||
|
||||
from swarms.structs.hiearchical_swarm import HierarchicalSwarm
|
||||
from swarms.agents import Agent
|
||||
|
||||
|
||||
def simple_callback(agent_name: str, chunk: str, is_final: bool):
|
||||
"""Simple callback that shows agent progress."""
|
||||
if chunk.strip():
|
||||
if is_final:
|
||||
print(f"✅ {agent_name} finished")
|
||||
else:
|
||||
print(f"🔄 {agent_name}: {chunk}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🎯 BASIC HIERARCHICAL SWARM STREAMING")
|
||||
|
||||
# Create a simple agent
|
||||
agent = Agent(
|
||||
agent_name="Simple_Agent",
|
||||
agent_description="A simple agent for demonstration",
|
||||
system_prompt="You are a helpful assistant.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# Create swarm
|
||||
swarm = HierarchicalSwarm(
|
||||
name="Basic_Swarm",
|
||||
description="Basic streaming demo",
|
||||
agents=[agent],
|
||||
max_loops=1,
|
||||
director_model_name="gpt-4o-mini",
|
||||
)
|
||||
|
||||
# Simple task
|
||||
task = "Explain what artificial intelligence is in simple terms."
|
||||
|
||||
print(f"Task: {task}")
|
||||
print("\nExecuting with streaming callback:\n")
|
||||
|
||||
# Run with streaming
|
||||
result = swarm.run(
|
||||
task=task,
|
||||
streaming_callback=simple_callback
|
||||
)
|
||||
|
||||
print("\n" + "="*30)
|
||||
print("Final result:")
|
||||
print(result)
|
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Hierarchical Swarm Batch Processing Demo
|
||||
|
||||
This demo shows how to use streaming callbacks with batch processing
|
||||
to handle multiple tasks sequentially with real-time feedback.
|
||||
"""
|
||||
|
||||
import time
|
||||
from typing import Callable
|
||||
from swarms.structs.hiearchical_swarm import HierarchicalSwarm
|
||||
from swarms.agents import Agent
|
||||
|
||||
|
||||
def create_batch_callback() -> Callable[[str, str, bool], None]:
|
||||
"""Create a callback optimized for batch processing."""
|
||||
|
||||
def batch_callback(agent_name: str, chunk: str, is_final: bool):
|
||||
timestamp = time.strftime("%H:%M:%S")
|
||||
|
||||
if chunk.strip():
|
||||
if is_final:
|
||||
print(f"\n✅ [{timestamp}] {agent_name} COMPLETED")
|
||||
else:
|
||||
# Shorter output for batch processing
|
||||
print(f"🔄 {agent_name}: {chunk[:30]}..." if len(chunk) > 30 else f"🔄 {agent_name}: {chunk}")
|
||||
|
||||
return batch_callback
|
||||
|
||||
|
||||
def create_agents():
|
||||
"""Create specialized agents for the swarm."""
|
||||
return [
|
||||
Agent(
|
||||
agent_name="Research_Agent",
|
||||
agent_description="Specialized in gathering and analyzing information",
|
||||
system_prompt="You are a research specialist. Provide detailed, accurate information on any topic.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Analysis_Agent",
|
||||
agent_description="Expert at analyzing data and drawing insights",
|
||||
system_prompt="You are an analysis expert. Break down complex information and provide clear insights.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("📦 HIERARCHICAL SWARM BATCH PROCESSING DEMO")
|
||||
print("="*50)
|
||||
|
||||
# Create agents and swarm
|
||||
agents = create_agents()
|
||||
swarm = HierarchicalSwarm(
|
||||
name="Batch_Processing_Swarm",
|
||||
description="Swarm for batch processing multiple tasks",
|
||||
agents=agents,
|
||||
max_loops=1,
|
||||
verbose=False, # Reduce verbosity for cleaner batch output
|
||||
director_model_name="gpt-4o-mini",
|
||||
)
|
||||
|
||||
# Define multiple tasks
|
||||
tasks = [
|
||||
"What are the environmental benefits of solar energy?",
|
||||
"How does wind power contribute to sustainable development?",
|
||||
"What are the economic advantages of hydroelectric power?"
|
||||
]
|
||||
|
||||
print(f"Processing {len(tasks)} tasks:")
|
||||
for i, task in enumerate(tasks, 1):
|
||||
print(f" {i}. {task}")
|
||||
print()
|
||||
|
||||
# Create streaming callback
|
||||
streaming_callback = create_batch_callback()
|
||||
|
||||
print("🎬 EXECUTING BATCH WITH STREAMING CALLBACKS...")
|
||||
print("Each task will show real-time progress:\n")
|
||||
|
||||
# Execute batch with streaming
|
||||
results = swarm.batched_run(
|
||||
tasks=tasks,
|
||||
streaming_callback=streaming_callback,
|
||||
)
|
||||
|
||||
print("\n" + "="*50)
|
||||
print("🎉 BATCH PROCESSING COMPLETED!")
|
||||
print(f"Processed {len(results)} tasks")
|
||||
|
||||
# Show summary
|
||||
print("\n📊 SUMMARY:")
|
||||
for i, result in enumerate(results, 1):
|
||||
print(f" Task {i}: {'Completed' if result else 'Failed'}")
|
@ -0,0 +1,124 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Hierarchical Swarm Comparison Demo
|
||||
|
||||
This demo compares traditional swarm execution (without streaming)
|
||||
versus streaming execution to show the difference in behavior.
|
||||
"""
|
||||
|
||||
from swarms.structs.hiearchical_swarm import HierarchicalSwarm
|
||||
from swarms.agents import Agent
|
||||
|
||||
|
||||
def create_agents():
|
||||
"""Create specialized agents for the swarm."""
|
||||
return [
|
||||
Agent(
|
||||
agent_name="Research_Agent",
|
||||
agent_description="Specialized in gathering and analyzing information",
|
||||
system_prompt="You are a research specialist. Provide detailed, accurate information on any topic.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Analysis_Agent",
|
||||
agent_description="Expert at analyzing data and drawing insights",
|
||||
system_prompt="You are an analysis expert. Break down complex information and provide clear insights.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Summary_Agent",
|
||||
agent_description="Skilled at creating concise summaries",
|
||||
system_prompt="You are a summarization expert. Create clear, concise summaries of complex topics.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def run_traditional_swarm():
|
||||
"""Run swarm without streaming callbacks."""
|
||||
print("🔇 TRADITIONAL SWARM EXECUTION (No Streaming)")
|
||||
print("-" * 50)
|
||||
|
||||
agents = create_agents()
|
||||
swarm = HierarchicalSwarm(
|
||||
name="Traditional_Swarm",
|
||||
description="Traditional swarm execution",
|
||||
agents=agents,
|
||||
max_loops=1,
|
||||
verbose=False,
|
||||
director_model_name="gpt-4o-mini",
|
||||
)
|
||||
|
||||
task = "What are the main benefits of renewable energy sources?"
|
||||
|
||||
print(f"Task: {task}")
|
||||
|
||||
result = swarm.run(task=task)
|
||||
|
||||
print("\nResult:")
|
||||
if isinstance(result, dict):
|
||||
for key, value in result.items():
|
||||
print(f"{key}: {value[:200]}..." if len(str(value)) > 200 else f"{key}: {value}")
|
||||
else:
|
||||
print(result[:500] + "..." if len(str(result)) > 500 else result)
|
||||
|
||||
|
||||
def run_streaming_swarm():
|
||||
"""Run swarm with streaming callbacks."""
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
def simple_callback(agent_name: str, chunk: str, is_final: bool):
|
||||
if chunk.strip():
|
||||
if is_final:
|
||||
print(f"\n✅ {agent_name} completed")
|
||||
else:
|
||||
print(f"🔄 {agent_name}: {chunk[:50]}..." if len(chunk) > 50 else f"🔄 {agent_name}: {chunk}")
|
||||
|
||||
print("\n🎯 STREAMING SWARM EXECUTION")
|
||||
print("-" * 50)
|
||||
|
||||
agents = create_agents()
|
||||
swarm = HierarchicalSwarm(
|
||||
name="Streaming_Swarm",
|
||||
description="Swarm with streaming callbacks",
|
||||
agents=agents,
|
||||
max_loops=1,
|
||||
verbose=False,
|
||||
director_model_name="gpt-4o-mini",
|
||||
)
|
||||
|
||||
task = "What are the main benefits of renewable energy sources?"
|
||||
|
||||
print(f"Task: {task}")
|
||||
|
||||
result = swarm.run(
|
||||
task=task,
|
||||
streaming_callback=simple_callback
|
||||
)
|
||||
|
||||
print("\nResult:")
|
||||
if isinstance(result, dict):
|
||||
for key, value in result.items():
|
||||
print(f"{key}: {value[:200]}..." if len(str(value)) > 200 else f"{key}: {value}")
|
||||
else:
|
||||
print(result[:500] + "..." if len(str(result)) > 500 else result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🔄 HIERARCHICAL SWARM COMPARISON DEMO")
|
||||
print("="*50)
|
||||
print("Comparing traditional vs streaming execution\n")
|
||||
|
||||
# Run traditional first
|
||||
run_traditional_swarm()
|
||||
|
||||
# Run streaming second
|
||||
run_streaming_swarm()
|
||||
|
||||
print("\n" + "="*50)
|
||||
print("✨ Comparison complete!")
|
||||
print("Notice how streaming shows progress in real-time")
|
@ -0,0 +1,232 @@
|
||||
"""
|
||||
Hierarchical Swarm Live Paragraph Streaming Example
|
||||
|
||||
This example demonstrates how to use the streaming callback feature
|
||||
in the HierarchicalSwarm to see live paragraph formation during agent execution.
|
||||
|
||||
The streaming callback allows you to:
|
||||
- Watch paragraphs build in real-time as tokens accumulate
|
||||
- See the complete text forming word by word
|
||||
- Track multiple agents working simultaneously
|
||||
- View completed paragraphs with timestamps
|
||||
- Monitor the entire generation process live
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.hiearchical_swarm import HierarchicalSwarm
|
||||
|
||||
|
||||
def streaming_callback(agent_name: str, chunk: str, is_final: bool):
|
||||
"""
|
||||
Example streaming callback function that shows live paragraph formation.
|
||||
|
||||
This function is called whenever an agent produces output during streaming.
|
||||
It shows tokens accumulating in real-time to form complete paragraphs.
|
||||
|
||||
Args:
|
||||
agent_name (str): The name of the agent producing the output
|
||||
chunk (str): The chunk of output (empty string if is_final=True)
|
||||
is_final (bool): True when the agent has completed its task
|
||||
"""
|
||||
timestamp = time.strftime("%H:%M:%S")
|
||||
|
||||
# Store accumulated text for each agent to track paragraph formation
|
||||
if not hasattr(streaming_callback, 'agent_buffers'):
|
||||
streaming_callback.agent_buffers = {}
|
||||
streaming_callback.paragraph_count = {}
|
||||
|
||||
# Initialize buffers for new agents
|
||||
if agent_name not in streaming_callback.agent_buffers:
|
||||
streaming_callback.agent_buffers[agent_name] = ""
|
||||
streaming_callback.paragraph_count[agent_name] = 1
|
||||
print(f"\n🎬 [{timestamp}] {agent_name} starting...")
|
||||
print("="*60)
|
||||
|
||||
if chunk.strip():
|
||||
# Split chunk into tokens (words/punctuation)
|
||||
tokens = chunk.replace('\n', ' \n ').split()
|
||||
|
||||
for token in tokens:
|
||||
# Handle paragraph breaks
|
||||
if token == '\n':
|
||||
if streaming_callback.agent_buffers[agent_name].strip():
|
||||
print(f"\n📄 [{timestamp}] {agent_name} - Paragraph {streaming_callback.paragraph_count[agent_name]} Complete:")
|
||||
print(f"{streaming_callback.agent_buffers[agent_name].strip()}")
|
||||
print("="*60)
|
||||
streaming_callback.paragraph_count[agent_name] += 1
|
||||
streaming_callback.agent_buffers[agent_name] = ""
|
||||
else:
|
||||
# Add token to buffer and show live accumulation
|
||||
streaming_callback.agent_buffers[agent_name] += token + " "
|
||||
|
||||
# Clear line and show current paragraph
|
||||
print(f"\r[{timestamp}] {agent_name} | {streaming_callback.agent_buffers[agent_name].strip()}", end="", flush=True)
|
||||
|
||||
if is_final:
|
||||
print() # New line after live updates
|
||||
# Print any remaining content as final paragraph
|
||||
if streaming_callback.agent_buffers[agent_name].strip():
|
||||
print(f"\n✅ [{timestamp}] {agent_name} COMPLETED - Final Paragraph:")
|
||||
print(f"{streaming_callback.agent_buffers[agent_name].strip()}")
|
||||
print()
|
||||
|
||||
print(f"🎯 [{timestamp}] {agent_name} finished processing")
|
||||
print(f"📊 Total paragraphs processed: {streaming_callback.paragraph_count[agent_name] - 1}")
|
||||
print("="*60)
|
||||
|
||||
|
||||
def create_sample_agents():
|
||||
"""Create sample agents for the hierarchical swarm."""
|
||||
# Marketing Strategist Agent
|
||||
marketing_agent = Agent(
|
||||
agent_name="MarketingStrategist",
|
||||
agent_description="Expert in marketing strategy and campaign planning",
|
||||
system_prompt="You are a marketing strategist. Provide creative and effective marketing strategies.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# Content Creator Agent
|
||||
content_agent = Agent(
|
||||
agent_name="ContentCreator",
|
||||
agent_description="Expert in creating engaging content",
|
||||
system_prompt="You are a content creator. Create engaging, well-written content for various platforms.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# Data Analyst Agent
|
||||
analyst_agent = Agent(
|
||||
agent_name="DataAnalyst",
|
||||
agent_description="Expert in data analysis and insights",
|
||||
system_prompt="You are a data analyst. Provide detailed analysis and insights from data.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
return [marketing_agent, content_agent, analyst_agent]
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function demonstrating hierarchical swarm with streaming."""
|
||||
print("🚀 Hierarchical Swarm Streaming Example")
|
||||
print("=" * 60)
|
||||
|
||||
# Create agents
|
||||
agents = create_sample_agents()
|
||||
|
||||
# Create hierarchical swarm
|
||||
swarm = HierarchicalSwarm(
|
||||
name="MarketingCampaignSwarm",
|
||||
description="A swarm for planning and executing marketing campaigns",
|
||||
agents=agents,
|
||||
director_model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Define the task
|
||||
task = """
|
||||
Plan and execute a comprehensive marketing campaign for a new tech startup called 'CodeFlow'
|
||||
that develops AI-powered code generation tools. The campaign should include:
|
||||
|
||||
1. Target audience analysis
|
||||
2. Content strategy development
|
||||
3. Social media campaign plan
|
||||
4. Performance metrics and KPIs
|
||||
|
||||
Create a detailed campaign plan with specific tactics, timelines, and budget considerations.
|
||||
"""
|
||||
|
||||
print(f"📋 Task: {task.strip()}")
|
||||
print("\n🎯 Starting hierarchical swarm with live paragraph streaming...")
|
||||
print("Watch as agents build complete paragraphs in real-time!\n")
|
||||
print("Each token accumulates to form readable text, showing the full paragraph as it builds.\n")
|
||||
|
||||
# Run the swarm with streaming callback
|
||||
try:
|
||||
result = swarm.run(
|
||||
task=task,
|
||||
streaming_callback=streaming_callback
|
||||
)
|
||||
|
||||
print("\n🎉 Swarm execution completed!")
|
||||
print("\n📊 Final Results:")
|
||||
print("-" * 30)
|
||||
print(result)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error running swarm: {str(e)}")
|
||||
|
||||
|
||||
def simple_callback_example():
|
||||
"""Simpler example with token-by-token streaming."""
|
||||
print("\n🔧 Simple Token-by-Token Callback Example")
|
||||
print("=" * 50)
|
||||
|
||||
def simple_callback(agent_name: str, chunk: str, is_final: bool):
|
||||
"""Simple callback that shows live paragraph formation."""
|
||||
if not hasattr(simple_callback, 'buffer'):
|
||||
simple_callback.buffer = {}
|
||||
simple_callback.token_count = {}
|
||||
|
||||
if agent_name not in simple_callback.buffer:
|
||||
simple_callback.buffer[agent_name] = ""
|
||||
simple_callback.token_count[agent_name] = 0
|
||||
|
||||
if chunk.strip():
|
||||
tokens = chunk.replace('\n', ' \n ').split()
|
||||
for token in tokens:
|
||||
if token.strip():
|
||||
simple_callback.token_count[agent_name] += 1
|
||||
simple_callback.buffer[agent_name] += token + " "
|
||||
# Show live accumulation
|
||||
print(f"\r{agent_name} | {simple_callback.buffer[agent_name].strip()}", end="", flush=True)
|
||||
|
||||
if is_final:
|
||||
print() # New line after live updates
|
||||
print(f"✓ {agent_name} finished! Total tokens: {simple_callback.token_count[agent_name]}")
|
||||
print(f"Final text: {simple_callback.buffer[agent_name].strip()}")
|
||||
print("-" * 40)
|
||||
|
||||
# Create simple agents
|
||||
agents = [
|
||||
Agent(
|
||||
agent_name="Researcher",
|
||||
agent_description="Research specialist",
|
||||
system_prompt="You are a researcher. Provide thorough research on given topics.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Writer",
|
||||
agent_description="Content writer",
|
||||
system_prompt="You are a writer. Create clear, concise content.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
),
|
||||
]
|
||||
|
||||
swarm = HierarchicalSwarm(
|
||||
name="SimpleSwarm",
|
||||
description="Simple swarm example",
|
||||
agents=agents,
|
||||
director_model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
task = "Research the benefits of renewable energy and write a summary article."
|
||||
|
||||
print(f"Task: {task}")
|
||||
result = swarm.run(task=task, streaming_callback=simple_callback)
|
||||
print(f"\nResult: {result}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run the main streaming example
|
||||
main()
|
||||
|
||||
# Uncomment to run the simple example
|
||||
# simple_callback_example()
|
@ -0,0 +1,135 @@
|
||||
import time
|
||||
from typing import Callable
|
||||
from swarms.structs.hiearchical_swarm import HierarchicalSwarm
|
||||
from swarms import Agent
|
||||
|
||||
|
||||
def create_streaming_callback() -> Callable[[str, str, bool], None]:
|
||||
"""Create a streaming callback that shows live paragraph formation."""
|
||||
|
||||
# Store accumulated text for each agent to track paragraph formation
|
||||
agent_buffers = {}
|
||||
paragraph_count = {}
|
||||
|
||||
def streaming_callback(agent_name: str, chunk: str, is_final: bool):
|
||||
timestamp = time.strftime("%H:%M:%S")
|
||||
|
||||
# Initialize buffers for new agents
|
||||
if agent_name not in agent_buffers:
|
||||
agent_buffers[agent_name] = ""
|
||||
paragraph_count[agent_name] = 1
|
||||
print(f"\n🎬 [{timestamp}] {agent_name} starting...")
|
||||
print("="*60)
|
||||
|
||||
if chunk.strip():
|
||||
# Split chunk into tokens (words/punctuation)
|
||||
tokens = chunk.replace('\n', ' \n ').split()
|
||||
|
||||
for token in tokens:
|
||||
# Handle paragraph breaks
|
||||
if token == '\n':
|
||||
if agent_buffers[agent_name].strip():
|
||||
print(f"\n📄 [{timestamp}] {agent_name} - Paragraph {paragraph_count[agent_name]} Complete:")
|
||||
print(f"{agent_buffers[agent_name].strip()}")
|
||||
print("="*60)
|
||||
paragraph_count[agent_name] += 1
|
||||
agent_buffers[agent_name] = ""
|
||||
else:
|
||||
# Add token to buffer and show live accumulation
|
||||
agent_buffers[agent_name] += token + " "
|
||||
|
||||
# Clear line and show current paragraph
|
||||
print(f"\r[{timestamp}] {agent_name} | {agent_buffers[agent_name].strip()}", end="", flush=True)
|
||||
|
||||
if is_final:
|
||||
print() # New line after live updates
|
||||
# Print any remaining content as final paragraph
|
||||
if agent_buffers[agent_name].strip():
|
||||
print(f"\n✅ [{timestamp}] {agent_name} COMPLETED - Final Paragraph:")
|
||||
print(f"{agent_buffers[agent_name].strip()}")
|
||||
print()
|
||||
|
||||
print(f"🎯 [{timestamp}] {agent_name} finished processing")
|
||||
print(f"📊 Total paragraphs processed: {paragraph_count[agent_name] - 1}")
|
||||
print("="*60)
|
||||
|
||||
return streaming_callback
|
||||
|
||||
|
||||
def create_agents():
|
||||
"""Create specialized agents for the swarm."""
|
||||
return [
|
||||
Agent(
|
||||
agent_name="Research_Agent",
|
||||
agent_description="Specialized in gathering and analyzing information",
|
||||
system_prompt="You are a research specialist. Provide detailed, accurate information on any topic.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
streaming_on=True,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Analysis_Agent",
|
||||
agent_description="Expert at analyzing data and drawing insights",
|
||||
system_prompt="You are an analysis expert. Break down complex information and provide clear insights.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
streaming_on=True,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Summary_Agent",
|
||||
agent_description="Skilled at creating concise summaries",
|
||||
system_prompt="You are a summarization expert. Create clear, concise summaries of complex topics.",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
streaming_on=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🎯 HIERARCHICAL SWARM STREAMING DEMO")
|
||||
print("="*50)
|
||||
|
||||
# Create agents and swarm
|
||||
agents = create_agents()
|
||||
swarm = HierarchicalSwarm(
|
||||
name="Research_and_Analysis_Swarm",
|
||||
description="A swarm that researches topics, analyzes information, and creates summaries",
|
||||
agents=agents,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
director_model_name="gpt-4o-mini",
|
||||
)
|
||||
|
||||
# Define task
|
||||
task = """
|
||||
Research the impact of artificial intelligence on the job market in 2024.
|
||||
Analyze how different industries are being affected and provide insights
|
||||
on future trends. Create a comprehensive summary of your findings.
|
||||
"""
|
||||
|
||||
print(f"Task: {task.strip()}")
|
||||
|
||||
# Create streaming callback
|
||||
streaming_callback = create_streaming_callback()
|
||||
|
||||
print("\n🎬 EXECUTING WITH STREAMING CALLBACKS...")
|
||||
print("Watch real-time agent outputs below:\n")
|
||||
|
||||
# Execute with streaming
|
||||
result = swarm.run(
|
||||
task=task,
|
||||
streaming_callback=streaming_callback,
|
||||
)
|
||||
|
||||
print("\n🎉 EXECUTION COMPLETED!")
|
||||
print("\n📊 FINAL RESULT:")
|
||||
print("-" * 50)
|
||||
|
||||
# Display final result
|
||||
if isinstance(result, dict):
|
||||
for key, value in result.items():
|
||||
print(f"\n{key}:")
|
||||
print(f"{value}")
|
||||
else:
|
||||
print(result)
|
Loading…
Reference in new issue