diff --git a/examples/multi_agent/concurrent_examples/concurrent_example.py b/concurrent_example.py similarity index 95% rename from examples/multi_agent/concurrent_examples/concurrent_example.py rename to concurrent_example.py index d10baed3..fe2af00d 100644 --- a/examples/multi_agent/concurrent_examples/concurrent_example.py +++ b/concurrent_example.py @@ -1,11 +1,5 @@ -import os - - from swarms import Agent, ConcurrentWorkflow -# Fetch the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - # Initialize agents for different roles delaware_ccorp_agent = Agent( agent_name="Delaware-CCorp-Hiring-Agent", @@ -59,7 +53,6 @@ task = """ # Run agents with tasks concurrently swarm = ConcurrentWorkflow( agents=agents, - return_str_on=False, output_type="list", ) diff --git a/examples/aop_examples/get_all_agents.py b/examples/aop_examples/client/get_all_agents.py similarity index 100% rename from examples/aop_examples/get_all_agents.py rename to examples/aop_examples/client/get_all_agents.py diff --git a/examples/guides/850_workshop/aop_raw_client_code.py b/examples/guides/850_workshop/aop_raw_client_code.py new file mode 100644 index 00000000..3d8a0098 --- /dev/null +++ b/examples/guides/850_workshop/aop_raw_client_code.py @@ -0,0 +1,89 @@ +import asyncio +import json + +from mcp import ClientSession +from mcp.client.streamable_http import streamablehttp_client + +from swarms.structs.aop import AOPCluster +from swarms.tools.mcp_client_tools import execute_tool_call_simple + + +async def discover_agents_example(): + """ + Discover all agents using the AOPCluster and print the result. + """ + aop_cluster = AOPCluster( + urls=["http://localhost:5932/mcp"], + transport="streamable-http", + ) + tool = aop_cluster.find_tool_by_server_name("discover_agents") + if not tool: + print("discover_agents tool not found.") + return None + + tool_call_request = { + "type": "function", + "function": { + "name": "discover_agents", + "arguments": "{}", + }, + } + + result = await execute_tool_call_simple( + response=tool_call_request, + server_path="http://localhost:5932/mcp", + output_type="dict", + verbose=False, + ) + print(json.dumps(result, indent=2)) + return result + + +async def raw_mcp_discover_agents_example(): + """ + Call the MCP server directly using the raw MCP client to execute the + built-in "discover_agents" tool and print the JSON result. + + This demonstrates how to: + - Initialize an MCP client over streamable HTTP + - List available tools (optional) + - Call a specific tool by name with arguments + """ + url = "http://localhost:5932/mcp" + + # Open a raw MCP client connection + async with streamablehttp_client(url, timeout=10) as ctx: + if len(ctx) == 2: + read, write = ctx + else: + read, write, *_ = ctx + + async with ClientSession(read, write) as session: + # Initialize the MCP session and optionally inspect tools + await session.initialize() + + # Optional: list tools (uncomment to print) + # tools = await session.list_tools() + # print(json.dumps(tools.model_dump(), indent=2)) + + # Call the built-in discovery tool with empty arguments + result = await session.call_tool( + name="discover_agents", + arguments={}, + ) + + # Convert to dict for pretty printing + print(json.dumps(result.model_dump(), indent=2)) + return result.model_dump() + + +def main(): + """ + Run the helper-based and raw MCP client discovery examples. + """ + # asyncio.run(discover_agents_example()) + asyncio.run(raw_mcp_discover_agents_example()) + + +if __name__ == "__main__": + main() diff --git a/examples/guides/850_workshop/aop_raw_task_example.py b/examples/guides/850_workshop/aop_raw_task_example.py new file mode 100644 index 00000000..7880d33f --- /dev/null +++ b/examples/guides/850_workshop/aop_raw_task_example.py @@ -0,0 +1,69 @@ +import asyncio +import json + +from mcp import ClientSession +from mcp.client.streamable_http import streamablehttp_client + + +async def call_agent_tool_raw( + url: str, + tool_name: str, + task: str, + img: str | None = None, + imgs: list[str] | None = None, + correct_answer: str | None = None, +) -> dict: + async with streamablehttp_client(url, timeout=30) as ctx: + if len(ctx) == 2: + read, write = ctx + else: + read, write, *_ = ctx + + async with ClientSession(read, write) as session: + await session.initialize() + arguments = {"task": task} + if img is not None: + arguments["img"] = img + if imgs is not None: + arguments["imgs"] = imgs + if correct_answer is not None: + arguments["correct_answer"] = correct_answer + result = await session.call_tool( + name=tool_name, arguments=arguments + ) + return result.model_dump() + + +async def list_available_tools(url: str) -> dict: + async with streamablehttp_client(url, timeout=30) as ctx: + if len(ctx) == 2: + read, write = ctx + else: + read, write, *_ = ctx + + async with ClientSession(read, write) as session: + await session.initialize() + tools = await session.list_tools() + return tools.model_dump() + + +def main(): + url = "http://localhost:5932/mcp" + tool_name = "Research-Agent" + task = "What are the latest experimental drug trials coming up in the next 6 months?" + + tools_info = asyncio.run(list_available_tools(url)) + print("Available tools:") + print(json.dumps(tools_info, indent=2)) + + ####### Step 2: Call the agent + + print(f"\nCalling tool '{tool_name}' with task...\n") + result = asyncio.run( + call_agent_tool_raw(url=url, tool_name=tool_name, task=task) + ) + print(json.dumps(result, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/examples/guides/850_workshop/moa_seq_example.py b/examples/guides/850_workshop/moa_seq_example.py new file mode 100644 index 00000000..2ffc2b87 --- /dev/null +++ b/examples/guides/850_workshop/moa_seq_example.py @@ -0,0 +1,19 @@ +from swarms.structs.self_moa_seq import SelfMoASeq + +# Initialize +moa_seq = SelfMoASeq( + model_name="anthropic/claude-haiku-4-5-20251001", + temperature=0.7, + window_size=6, + verbose=True, + num_samples=4, + top_p=None, +) + +task = ( + "Describe an effective treatment plan for a patient with a broken rib. " + "Include immediate care, pain management, expected recovery timeline, and potential complications to watch for." +) + +result = moa_seq.run(task) +print(result) diff --git a/examples/guides/850_workshop/peer_review_example.py b/examples/guides/850_workshop/peer_review_example.py new file mode 100644 index 00000000..6220894b --- /dev/null +++ b/examples/guides/850_workshop/peer_review_example.py @@ -0,0 +1,86 @@ +from typing import List, Dict, Any +from swarms import Agent +from swarms.structs.social_algorithms import SocialAlgorithms + + +def peer_review_algorithm( + agents: List[Agent], task: str, **kwargs +) -> Dict[str, Any]: + """ + A peer review social algorithm where agents review each other's work. + + Args: + agents: List of agents participating in the algorithm + task: The task to be processed + **kwargs: Additional keyword arguments + + Returns: + Dict containing the results from each agent and their reviews + """ + if len(agents) < 2: + raise ValueError("This algorithm requires at least 2 agents") + + results = {} + reviews = {} + + # Each agent works on the task independently + for i, agent in enumerate(agents): + agent_prompt = f"Work on the following task: {task}" + result = agent.run(agent_prompt) + results[f"agent_{i}_{agent.agent_name}"] = result + + # Each agent reviews another agent's work (circular review) + for i, agent in enumerate(agents): + reviewer_index = (i + 1) % len(agents) + reviewed_agent = agents[reviewer_index] + + review_prompt = f"Review the following work by {reviewed_agent.agent_name}:\n\n{results[f'agent_{reviewer_index}_{reviewed_agent.agent_name}']}\n\nProvide constructive feedback and suggestions for improvement." + review = agent.run(review_prompt) + reviews[ + f"{agent.agent_name}_reviews_{reviewed_agent.agent_name}" + ] = review + + return { + "original_work": results, + "peer_reviews": reviews, + "task": task, + } + + +# Create agents +researcher = Agent( + agent_name="Researcher", + system_prompt="You are a research specialist focused on gathering comprehensive information.", + model_name="anthropic/claude-haiku-4-5-20251001", + max_loops=1, + top_p=None, +) + +analyst = Agent( + agent_name="Analyst", + system_prompt="You are an analytical specialist focused on interpreting and analyzing data.", + model_name="anthropic/claude-haiku-4-5-20251001", + max_loops=1, + top_p=None, +) + +reviewer = Agent( + agent_name="Reviewer", + system_prompt="You are a quality reviewer focused on providing constructive feedback.", + model_name="anthropic/claude-haiku-4-5-20251001", + max_loops=1, + top_p=None, +) + +# Create and run the social algorithm +social_alg = SocialAlgorithms( + name="Peer-Review", + description="Peer review workflow where agents review each other's work", + agents=[researcher, analyst, reviewer], + social_algorithm=peer_review_algorithm, + verbose=False, +) + +result = social_alg.run("What are the best gold ETFS") + +print(result) diff --git a/examples/guides/850_workshop/server.py b/examples/guides/850_workshop/server.py new file mode 100644 index 00000000..3d51f3a8 --- /dev/null +++ b/examples/guides/850_workshop/server.py @@ -0,0 +1,108 @@ +from swarms import Agent, AOP + +# Create specialized agents +research_agent = Agent( + agent_name="Research-Agent", + agent_description="Expert in research, data collection, and information gathering", + model_name="anthropic/claude-haiku-4-5-20251001", + max_loops=1, + top_p=None, + dynamic_temperature_enabled=True, + system_prompt="""You are a research specialist. Your role is to: + 1. Gather comprehensive information on any given topic + 2. Analyze data from multiple sources + 3. Provide well-structured research findings + 4. Cite sources and maintain accuracy + 5. Present findings in a clear, organized manner + + Always provide detailed, factual information with proper context.""", +) + +analysis_agent = Agent( + agent_name="Analysis-Agent", + agent_description="Expert in data analysis, pattern recognition, and generating insights", + model_name="anthropic/claude-haiku-4-5-20251001", + max_loops=1, + top_p=None, + dynamic_temperature_enabled=True, + system_prompt="""You are an analysis specialist. Your role is to: + 1. Analyze data and identify patterns + 2. Generate actionable insights + 3. Create visualizations and summaries + 4. Provide statistical analysis + 5. Make data-driven recommendations + + Focus on extracting meaningful insights from information.""", +) + +writing_agent = Agent( + agent_name="Writing-Agent", + agent_description="Expert in content creation, editing, and communication", + model_name="anthropic/claude-haiku-4-5-20251001", + max_loops=1, + top_p=None, + dynamic_temperature_enabled=True, + system_prompt="""You are a writing specialist. Your role is to: + 1. Create engaging, well-structured content + 2. Edit and improve existing text + 3. Adapt tone and style for different audiences + 4. Ensure clarity and coherence + 5. Follow best practices in writing + + Always produce high-quality, professional content.""", +) + +code_agent = Agent( + agent_name="Code-Agent", + agent_description="Expert in programming, code review, and software development", + model_name="anthropic/claude-haiku-4-5-20251001", + max_loops=1, + top_p=None, + dynamic_temperature_enabled=True, + system_prompt="""You are a coding specialist. Your role is to: + 1. Write clean, efficient code + 2. Debug and fix issues + 3. Review and optimize code + 4. Explain programming concepts + 5. Follow best practices and standards + + Always provide working, well-documented code.""", +) + +financial_agent = Agent( + agent_name="Financial-Agent", + agent_description="Expert in financial analysis, market research, and investment insights", + model_name="anthropic/claude-haiku-4-5-20251001", + max_loops=1, + top_p=None, + dynamic_temperature_enabled=True, + system_prompt="""You are a financial specialist. Your role is to: + 1. Analyze financial data and markets + 2. Provide investment insights + 3. Assess risk and opportunities + 4. Create financial reports + 5. Explain complex financial concepts + + Always provide accurate, well-reasoned financial analysis.""", +) + +# Basic usage - individual agent addition +deployer = AOP( + server_name="MyAgentServer", + verbose=True, + port=5932, + transport="streamable-http", +) + +agents = [ + research_agent, + analysis_agent, + writing_agent, + code_agent, + financial_agent, +] + +deployer.add_agents_batch(agents) + + +deployer.run() diff --git a/examples/guides/850_workshop/test_agent_concurrent.py b/examples/guides/850_workshop/test_agent_concurrent.py new file mode 100644 index 00000000..2b5ec017 --- /dev/null +++ b/examples/guides/850_workshop/test_agent_concurrent.py @@ -0,0 +1,32 @@ +from swarms import Agent, run_agents_concurrently +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) + +# Initialize the equity analyst agents +equity_analyst_1 = Agent( + agent_name="Equity-Analyst-1", + agent_description="Equity research analyst focused on fundamental analysis", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4.1", + dynamic_temperature_enabled=True, +) + +equity_analyst_2 = Agent( + agent_name="Equity-Analyst-2", + agent_description="Equity research analyst focused on technical analysis", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4.1", + dynamic_temperature_enabled=True, +) + + +outputs = run_agents_concurrently( + agents=[equity_analyst_1, equity_analyst_2], + task="What are the best ETFs for energy? Provide a reasoned comparison between top options and summarize your findings in a markdown table.", + return_agent_output_dict=True, +) + +print(outputs) diff --git a/examples/guides/850_workshop/uvloop_example.py b/examples/guides/850_workshop/uvloop_example.py new file mode 100644 index 00000000..bf15da88 --- /dev/null +++ b/examples/guides/850_workshop/uvloop_example.py @@ -0,0 +1,32 @@ +from swarms import Agent, run_agents_concurrently_uvloop +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) + + +# Initialize the equity analyst agents +equity_analyst_1 = Agent( + agent_name="Equity-Analyst-1", + agent_description="Equity research analyst focused on fundamental analysis", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4.1", + dynamic_temperature_enabled=True, +) + +equity_analyst_2 = Agent( + agent_name="Equity-Analyst-2", + agent_description="Equity research analyst focused on technical analysis", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + max_loops=1, + model_name="gpt-4.1", + dynamic_temperature_enabled=True, +) + + +outputs = run_agents_concurrently_uvloop( + agents=[equity_analyst_1, equity_analyst_2], + task="What are the best new therapies for diabetes?", +) + +print(outputs) diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py index 286dccb6..a5f3a04d 100644 --- a/swarms/structs/concurrent_workflow.py +++ b/swarms/structs/concurrent_workflow.py @@ -100,7 +100,9 @@ class ConcurrentWorkflow: self.agent_statuses = {} self.reliability_check() - self.conversation = Conversation(name=f"concurrent_workflow_name_{name}_id_{self.id}_conversation") + self.conversation = Conversation( + name=f"concurrent_workflow_name_{name}_id_{self.id}_conversation" + ) if self.show_dashboard is True: self.agents = self.fix_agents() diff --git a/swarms/structs/self_moa_seq.py b/swarms/structs/self_moa_seq.py index 1fa9d861..0bf3ad2c 100644 --- a/swarms/structs/self_moa_seq.py +++ b/swarms/structs/self_moa_seq.py @@ -76,6 +76,8 @@ class SelfMoASeq: retry_delay: float = 1.0, retry_backoff_multiplier: float = 2.0, retry_max_delay: float = 60.0, + additional_kwargs: Dict[str, Any] = {}, + top_p: Optional[float] = None, ): # Validate parameters if window_size < 2: @@ -137,6 +139,7 @@ class SelfMoASeq: temperature=self.temperature, max_loops=1, verbose=self.verbose, + top_p=top_p, ) # Initialize aggregator agent (synthesizes outputs) @@ -152,6 +155,7 @@ class SelfMoASeq: temperature=0.0, # Deterministic aggregation max_loops=1, verbose=self.verbose, + top_p=top_p, ) # Metrics tracking diff --git a/swarms/structs/social_algorithms.py b/swarms/structs/social_algorithms.py index c830fb21..aeee7a69 100644 --- a/swarms/structs/social_algorithms.py +++ b/swarms/structs/social_algorithms.py @@ -143,7 +143,7 @@ class SocialAlgorithms: max_execution_time: float = 300.0, # 5 minutes default output_type: OutputType = "dict", verbose: bool = False, - enable_communication_logging: bool = True, + enable_communication_logging: bool = False, parallel_execution: bool = False, max_workers: int = None, *args, @@ -613,24 +613,28 @@ class SocialAlgorithms: original_run = Agent.run def logged_talk_to( - self, agent, task, img=None, *args, **kwargs + agent_self, agent, task, img=None, *args, **kwargs ): # Log the communication self._log_communication( - self.agent_name, agent.agent_name, task + agent_self.agent_name, agent.agent_name, task ) # Call original method return original_talk_to( - self, agent, task, img, *args, **kwargs + agent_self, agent, task, img, *args, **kwargs ) - def logged_run(self, task, img=None, *args, **kwargs): + def logged_run( + agent_self, task, img=None, *args, **kwargs + ): # Log the communication (self-communication) self._log_communication( - self.agent_name, self.agent_name, task + agent_self.agent_name, agent_self.agent_name, task ) # Call original method - return original_run(self, task, img, *args, **kwargs) + return original_run( + agent_self, task, img, *args, **kwargs + ) # Temporarily replace methods Agent.talk_to = logged_talk_to diff --git a/tests/structs/multiagentrouter_models_test.py b/tests/structs/multiagentrouter_models_test.py index 3bdc89fe..7ec28bb0 100644 --- a/tests/structs/multiagentrouter_models_test.py +++ b/tests/structs/multiagentrouter_models_test.py @@ -27,11 +27,11 @@ models_to_test = [ "gpt-4.1", "gpt-4o", "gpt-5-mini", - "o4-mini", - "o3", - "claude-opus-4-20250514", + "o4-mini", + "o3", + "claude-opus-4-20250514", "claude-sonnet-4-20250514", - "claude-3-7-sonnet-20250219", + "claude-3-7-sonnet-20250219", "gemini/gemini-2.5-flash", "gemini/gemini-2.5-pro", ] @@ -43,17 +43,22 @@ model_logs = [] for model_name in models_to_test: print(f"\n--- Testing model: {model_name} ---") router_execute = MultiAgentRouter( - agents=agents, temperature=0.5, model=model_name, + agents=agents, + temperature=0.5, + model=model_name, ) try: result = router_execute.run(task) print(f"Run completed successfully for {model_name}") - model_logs.append({"model": model_name, "status": "✅ Success"}) + model_logs.append( + {"model": model_name, "status": "✅ Success"} + ) except Exception as e: print(f"An error occurred for {model_name}") - model_logs.append({"model": model_name, "status": f"❌ Error: {e}"}) + model_logs.append( + {"model": model_name, "status": f"❌ Error: {e}"} + ) print("\n===== Model Run Summary =====") for log in model_logs: print(f"{log['model']}: {log['status']}") -