parent
da7f5194a3
commit
3f1ed3d043
@ -0,0 +1,89 @@
|
|||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
|
||||||
|
from mcp import ClientSession
|
||||||
|
from mcp.client.streamable_http import streamablehttp_client
|
||||||
|
|
||||||
|
from swarms.structs.aop import AOPCluster
|
||||||
|
from swarms.tools.mcp_client_tools import execute_tool_call_simple
|
||||||
|
|
||||||
|
|
||||||
|
async def discover_agents_example():
|
||||||
|
"""
|
||||||
|
Discover all agents using the AOPCluster and print the result.
|
||||||
|
"""
|
||||||
|
aop_cluster = AOPCluster(
|
||||||
|
urls=["http://localhost:5932/mcp"],
|
||||||
|
transport="streamable-http",
|
||||||
|
)
|
||||||
|
tool = aop_cluster.find_tool_by_server_name("discover_agents")
|
||||||
|
if not tool:
|
||||||
|
print("discover_agents tool not found.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
tool_call_request = {
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "discover_agents",
|
||||||
|
"arguments": "{}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result = await execute_tool_call_simple(
|
||||||
|
response=tool_call_request,
|
||||||
|
server_path="http://localhost:5932/mcp",
|
||||||
|
output_type="dict",
|
||||||
|
verbose=False,
|
||||||
|
)
|
||||||
|
print(json.dumps(result, indent=2))
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
async def raw_mcp_discover_agents_example():
|
||||||
|
"""
|
||||||
|
Call the MCP server directly using the raw MCP client to execute the
|
||||||
|
built-in "discover_agents" tool and print the JSON result.
|
||||||
|
|
||||||
|
This demonstrates how to:
|
||||||
|
- Initialize an MCP client over streamable HTTP
|
||||||
|
- List available tools (optional)
|
||||||
|
- Call a specific tool by name with arguments
|
||||||
|
"""
|
||||||
|
url = "http://localhost:5932/mcp"
|
||||||
|
|
||||||
|
# Open a raw MCP client connection
|
||||||
|
async with streamablehttp_client(url, timeout=10) as ctx:
|
||||||
|
if len(ctx) == 2:
|
||||||
|
read, write = ctx
|
||||||
|
else:
|
||||||
|
read, write, *_ = ctx
|
||||||
|
|
||||||
|
async with ClientSession(read, write) as session:
|
||||||
|
# Initialize the MCP session and optionally inspect tools
|
||||||
|
await session.initialize()
|
||||||
|
|
||||||
|
# Optional: list tools (uncomment to print)
|
||||||
|
# tools = await session.list_tools()
|
||||||
|
# print(json.dumps(tools.model_dump(), indent=2))
|
||||||
|
|
||||||
|
# Call the built-in discovery tool with empty arguments
|
||||||
|
result = await session.call_tool(
|
||||||
|
name="discover_agents",
|
||||||
|
arguments={},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert to dict for pretty printing
|
||||||
|
print(json.dumps(result.model_dump(), indent=2))
|
||||||
|
return result.model_dump()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""
|
||||||
|
Run the helper-based and raw MCP client discovery examples.
|
||||||
|
"""
|
||||||
|
# asyncio.run(discover_agents_example())
|
||||||
|
asyncio.run(raw_mcp_discover_agents_example())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -0,0 +1,69 @@
|
|||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
|
||||||
|
from mcp import ClientSession
|
||||||
|
from mcp.client.streamable_http import streamablehttp_client
|
||||||
|
|
||||||
|
|
||||||
|
async def call_agent_tool_raw(
|
||||||
|
url: str,
|
||||||
|
tool_name: str,
|
||||||
|
task: str,
|
||||||
|
img: str | None = None,
|
||||||
|
imgs: list[str] | None = None,
|
||||||
|
correct_answer: str | None = None,
|
||||||
|
) -> dict:
|
||||||
|
async with streamablehttp_client(url, timeout=30) as ctx:
|
||||||
|
if len(ctx) == 2:
|
||||||
|
read, write = ctx
|
||||||
|
else:
|
||||||
|
read, write, *_ = ctx
|
||||||
|
|
||||||
|
async with ClientSession(read, write) as session:
|
||||||
|
await session.initialize()
|
||||||
|
arguments = {"task": task}
|
||||||
|
if img is not None:
|
||||||
|
arguments["img"] = img
|
||||||
|
if imgs is not None:
|
||||||
|
arguments["imgs"] = imgs
|
||||||
|
if correct_answer is not None:
|
||||||
|
arguments["correct_answer"] = correct_answer
|
||||||
|
result = await session.call_tool(
|
||||||
|
name=tool_name, arguments=arguments
|
||||||
|
)
|
||||||
|
return result.model_dump()
|
||||||
|
|
||||||
|
|
||||||
|
async def list_available_tools(url: str) -> dict:
|
||||||
|
async with streamablehttp_client(url, timeout=30) as ctx:
|
||||||
|
if len(ctx) == 2:
|
||||||
|
read, write = ctx
|
||||||
|
else:
|
||||||
|
read, write, *_ = ctx
|
||||||
|
|
||||||
|
async with ClientSession(read, write) as session:
|
||||||
|
await session.initialize()
|
||||||
|
tools = await session.list_tools()
|
||||||
|
return tools.model_dump()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
url = "http://localhost:5932/mcp"
|
||||||
|
tool_name = "Research-Agent"
|
||||||
|
task = "What are the latest experimental drug trials coming up in the next 6 months?"
|
||||||
|
|
||||||
|
tools_info = asyncio.run(list_available_tools(url))
|
||||||
|
print("Available tools:")
|
||||||
|
print(json.dumps(tools_info, indent=2))
|
||||||
|
|
||||||
|
####### Step 2: Call the agent
|
||||||
|
|
||||||
|
print(f"\nCalling tool '{tool_name}' with task...\n")
|
||||||
|
result = asyncio.run(
|
||||||
|
call_agent_tool_raw(url=url, tool_name=tool_name, task=task)
|
||||||
|
)
|
||||||
|
print(json.dumps(result, indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -0,0 +1,19 @@
|
|||||||
|
from swarms.structs.self_moa_seq import SelfMoASeq
|
||||||
|
|
||||||
|
# Initialize
|
||||||
|
moa_seq = SelfMoASeq(
|
||||||
|
model_name="anthropic/claude-haiku-4-5-20251001",
|
||||||
|
temperature=0.7,
|
||||||
|
window_size=6,
|
||||||
|
verbose=True,
|
||||||
|
num_samples=4,
|
||||||
|
top_p=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
task = (
|
||||||
|
"Describe an effective treatment plan for a patient with a broken rib. "
|
||||||
|
"Include immediate care, pain management, expected recovery timeline, and potential complications to watch for."
|
||||||
|
)
|
||||||
|
|
||||||
|
result = moa_seq.run(task)
|
||||||
|
print(result)
|
@ -0,0 +1,86 @@
|
|||||||
|
from typing import List, Dict, Any
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.structs.social_algorithms import SocialAlgorithms
|
||||||
|
|
||||||
|
|
||||||
|
def peer_review_algorithm(
|
||||||
|
agents: List[Agent], task: str, **kwargs
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
A peer review social algorithm where agents review each other's work.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agents: List of agents participating in the algorithm
|
||||||
|
task: The task to be processed
|
||||||
|
**kwargs: Additional keyword arguments
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict containing the results from each agent and their reviews
|
||||||
|
"""
|
||||||
|
if len(agents) < 2:
|
||||||
|
raise ValueError("This algorithm requires at least 2 agents")
|
||||||
|
|
||||||
|
results = {}
|
||||||
|
reviews = {}
|
||||||
|
|
||||||
|
# Each agent works on the task independently
|
||||||
|
for i, agent in enumerate(agents):
|
||||||
|
agent_prompt = f"Work on the following task: {task}"
|
||||||
|
result = agent.run(agent_prompt)
|
||||||
|
results[f"agent_{i}_{agent.agent_name}"] = result
|
||||||
|
|
||||||
|
# Each agent reviews another agent's work (circular review)
|
||||||
|
for i, agent in enumerate(agents):
|
||||||
|
reviewer_index = (i + 1) % len(agents)
|
||||||
|
reviewed_agent = agents[reviewer_index]
|
||||||
|
|
||||||
|
review_prompt = f"Review the following work by {reviewed_agent.agent_name}:\n\n{results[f'agent_{reviewer_index}_{reviewed_agent.agent_name}']}\n\nProvide constructive feedback and suggestions for improvement."
|
||||||
|
review = agent.run(review_prompt)
|
||||||
|
reviews[
|
||||||
|
f"{agent.agent_name}_reviews_{reviewed_agent.agent_name}"
|
||||||
|
] = review
|
||||||
|
|
||||||
|
return {
|
||||||
|
"original_work": results,
|
||||||
|
"peer_reviews": reviews,
|
||||||
|
"task": task,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Create agents
|
||||||
|
researcher = Agent(
|
||||||
|
agent_name="Researcher",
|
||||||
|
system_prompt="You are a research specialist focused on gathering comprehensive information.",
|
||||||
|
model_name="anthropic/claude-haiku-4-5-20251001",
|
||||||
|
max_loops=1,
|
||||||
|
top_p=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
analyst = Agent(
|
||||||
|
agent_name="Analyst",
|
||||||
|
system_prompt="You are an analytical specialist focused on interpreting and analyzing data.",
|
||||||
|
model_name="anthropic/claude-haiku-4-5-20251001",
|
||||||
|
max_loops=1,
|
||||||
|
top_p=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
reviewer = Agent(
|
||||||
|
agent_name="Reviewer",
|
||||||
|
system_prompt="You are a quality reviewer focused on providing constructive feedback.",
|
||||||
|
model_name="anthropic/claude-haiku-4-5-20251001",
|
||||||
|
max_loops=1,
|
||||||
|
top_p=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create and run the social algorithm
|
||||||
|
social_alg = SocialAlgorithms(
|
||||||
|
name="Peer-Review",
|
||||||
|
description="Peer review workflow where agents review each other's work",
|
||||||
|
agents=[researcher, analyst, reviewer],
|
||||||
|
social_algorithm=peer_review_algorithm,
|
||||||
|
verbose=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
result = social_alg.run("What are the best gold ETFS")
|
||||||
|
|
||||||
|
print(result)
|
@ -0,0 +1,108 @@
|
|||||||
|
from swarms import Agent, AOP
|
||||||
|
|
||||||
|
# Create specialized agents
|
||||||
|
research_agent = Agent(
|
||||||
|
agent_name="Research-Agent",
|
||||||
|
agent_description="Expert in research, data collection, and information gathering",
|
||||||
|
model_name="anthropic/claude-haiku-4-5-20251001",
|
||||||
|
max_loops=1,
|
||||||
|
top_p=None,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
system_prompt="""You are a research specialist. Your role is to:
|
||||||
|
1. Gather comprehensive information on any given topic
|
||||||
|
2. Analyze data from multiple sources
|
||||||
|
3. Provide well-structured research findings
|
||||||
|
4. Cite sources and maintain accuracy
|
||||||
|
5. Present findings in a clear, organized manner
|
||||||
|
|
||||||
|
Always provide detailed, factual information with proper context.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
analysis_agent = Agent(
|
||||||
|
agent_name="Analysis-Agent",
|
||||||
|
agent_description="Expert in data analysis, pattern recognition, and generating insights",
|
||||||
|
model_name="anthropic/claude-haiku-4-5-20251001",
|
||||||
|
max_loops=1,
|
||||||
|
top_p=None,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
system_prompt="""You are an analysis specialist. Your role is to:
|
||||||
|
1. Analyze data and identify patterns
|
||||||
|
2. Generate actionable insights
|
||||||
|
3. Create visualizations and summaries
|
||||||
|
4. Provide statistical analysis
|
||||||
|
5. Make data-driven recommendations
|
||||||
|
|
||||||
|
Focus on extracting meaningful insights from information.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
writing_agent = Agent(
|
||||||
|
agent_name="Writing-Agent",
|
||||||
|
agent_description="Expert in content creation, editing, and communication",
|
||||||
|
model_name="anthropic/claude-haiku-4-5-20251001",
|
||||||
|
max_loops=1,
|
||||||
|
top_p=None,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
system_prompt="""You are a writing specialist. Your role is to:
|
||||||
|
1. Create engaging, well-structured content
|
||||||
|
2. Edit and improve existing text
|
||||||
|
3. Adapt tone and style for different audiences
|
||||||
|
4. Ensure clarity and coherence
|
||||||
|
5. Follow best practices in writing
|
||||||
|
|
||||||
|
Always produce high-quality, professional content.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
code_agent = Agent(
|
||||||
|
agent_name="Code-Agent",
|
||||||
|
agent_description="Expert in programming, code review, and software development",
|
||||||
|
model_name="anthropic/claude-haiku-4-5-20251001",
|
||||||
|
max_loops=1,
|
||||||
|
top_p=None,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
system_prompt="""You are a coding specialist. Your role is to:
|
||||||
|
1. Write clean, efficient code
|
||||||
|
2. Debug and fix issues
|
||||||
|
3. Review and optimize code
|
||||||
|
4. Explain programming concepts
|
||||||
|
5. Follow best practices and standards
|
||||||
|
|
||||||
|
Always provide working, well-documented code.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
financial_agent = Agent(
|
||||||
|
agent_name="Financial-Agent",
|
||||||
|
agent_description="Expert in financial analysis, market research, and investment insights",
|
||||||
|
model_name="anthropic/claude-haiku-4-5-20251001",
|
||||||
|
max_loops=1,
|
||||||
|
top_p=None,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
system_prompt="""You are a financial specialist. Your role is to:
|
||||||
|
1. Analyze financial data and markets
|
||||||
|
2. Provide investment insights
|
||||||
|
3. Assess risk and opportunities
|
||||||
|
4. Create financial reports
|
||||||
|
5. Explain complex financial concepts
|
||||||
|
|
||||||
|
Always provide accurate, well-reasoned financial analysis.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Basic usage - individual agent addition
|
||||||
|
deployer = AOP(
|
||||||
|
server_name="MyAgentServer",
|
||||||
|
verbose=True,
|
||||||
|
port=5932,
|
||||||
|
transport="streamable-http",
|
||||||
|
)
|
||||||
|
|
||||||
|
agents = [
|
||||||
|
research_agent,
|
||||||
|
analysis_agent,
|
||||||
|
writing_agent,
|
||||||
|
code_agent,
|
||||||
|
financial_agent,
|
||||||
|
]
|
||||||
|
|
||||||
|
deployer.add_agents_batch(agents)
|
||||||
|
|
||||||
|
|
||||||
|
deployer.run()
|
@ -0,0 +1,32 @@
|
|||||||
|
from swarms import Agent, run_agents_concurrently
|
||||||
|
from swarms.prompts.finance_agent_sys_prompt import (
|
||||||
|
FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the equity analyst agents
|
||||||
|
equity_analyst_1 = Agent(
|
||||||
|
agent_name="Equity-Analyst-1",
|
||||||
|
agent_description="Equity research analyst focused on fundamental analysis",
|
||||||
|
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
max_loops=1,
|
||||||
|
model_name="gpt-4.1",
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
equity_analyst_2 = Agent(
|
||||||
|
agent_name="Equity-Analyst-2",
|
||||||
|
agent_description="Equity research analyst focused on technical analysis",
|
||||||
|
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
max_loops=1,
|
||||||
|
model_name="gpt-4.1",
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
outputs = run_agents_concurrently(
|
||||||
|
agents=[equity_analyst_1, equity_analyst_2],
|
||||||
|
task="What are the best ETFs for energy? Provide a reasoned comparison between top options and summarize your findings in a markdown table.",
|
||||||
|
return_agent_output_dict=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(outputs)
|
@ -0,0 +1,32 @@
|
|||||||
|
from swarms import Agent, run_agents_concurrently_uvloop
|
||||||
|
from swarms.prompts.finance_agent_sys_prompt import (
|
||||||
|
FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the equity analyst agents
|
||||||
|
equity_analyst_1 = Agent(
|
||||||
|
agent_name="Equity-Analyst-1",
|
||||||
|
agent_description="Equity research analyst focused on fundamental analysis",
|
||||||
|
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
max_loops=1,
|
||||||
|
model_name="gpt-4.1",
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
equity_analyst_2 = Agent(
|
||||||
|
agent_name="Equity-Analyst-2",
|
||||||
|
agent_description="Equity research analyst focused on technical analysis",
|
||||||
|
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
max_loops=1,
|
||||||
|
model_name="gpt-4.1",
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
outputs = run_agents_concurrently_uvloop(
|
||||||
|
agents=[equity_analyst_1, equity_analyst_2],
|
||||||
|
task="What are the best new therapies for diabetes?",
|
||||||
|
)
|
||||||
|
|
||||||
|
print(outputs)
|
Loading…
Reference in new issue