[FIX][Mcp logic file path name] [Improvement][Majority voting] [Improvement][Agent markdown formatter]

pull/1064/head
Kye Gomez 2 months ago
parent cd75b19127
commit 7b6b7c4980

@ -52298,7 +52298,7 @@ Asynchronously fetches available MCP tools from the server with retry logic.
```python
import asyncio
from swarms.tools.mcp_client_call import aget_mcp_tools
from swarms.tools.mcp_client_tools import aget_mcp_tools
from swarms.tools.mcp_connection import MCPConnection
async def main():
@ -52348,7 +52348,7 @@ Synchronous version of get_mcp_tools that handles event loop management.
#### Example
```python
from swarms.tools.mcp_client_call import get_mcp_tools_sync
from swarms.tools.mcp_client_tools import get_mcp_tools_sync
from swarms.tools.mcp_connection import MCPConnection
# Using server path
@ -52390,7 +52390,7 @@ Get tools for multiple MCP servers concurrently using ThreadPoolExecutor.
#### Example
```python
from swarms.tools.mcp_client_call import get_tools_for_multiple_mcp_servers
from swarms.tools.mcp_client_tools import get_tools_for_multiple_mcp_servers
from swarms.tools.mcp_connection import MCPConnection
# Define server URLs
@ -52445,7 +52445,7 @@ Execute a tool call using the MCP client.
#### Example
```python
import asyncio
from swarms.tools.mcp_client_call import execute_tool_call_simple
from swarms.tools.mcp_client_tools import execute_tool_call_simple
from swarms.tools.mcp_connection import MCPConnection
async def main():

@ -42,7 +42,7 @@ Asynchronously fetches available MCP tools from the server with retry logic.
```python
import asyncio
from swarms.tools.mcp_client_call import aget_mcp_tools
from swarms.tools.mcp_client_tools import aget_mcp_tools
from swarms.tools.mcp_connection import MCPConnection
async def main():
@ -92,7 +92,7 @@ Synchronous version of get_mcp_tools that handles event loop management.
#### Example
```python
from swarms.tools.mcp_client_call import get_mcp_tools_sync
from swarms.tools.mcp_client_tools import get_mcp_tools_sync
from swarms.tools.mcp_connection import MCPConnection
# Using server path
@ -134,7 +134,7 @@ Get tools for multiple MCP servers concurrently using ThreadPoolExecutor.
#### Example
```python
from swarms.tools.mcp_client_call import get_tools_for_multiple_mcp_servers
from swarms.tools.mcp_client_tools import get_tools_for_multiple_mcp_servers
from swarms.tools.mcp_connection import MCPConnection
# Define server URLs
@ -189,7 +189,7 @@ Execute a tool call using the MCP client.
#### Example
```python
import asyncio
from swarms.tools.mcp_client_call import execute_tool_call_simple
from swarms.tools.mcp_client_tools import execute_tool_call_simple
from swarms.tools.mcp_connection import MCPConnection
async def main():

@ -1,19 +1,5 @@
from swarms import Agent
# SYSTEM_PROMPT = (
# "You are an expert system for generating immersive, location-based augmented reality (AR) experiences. "
# "Given an input image, your task is to thoroughly analyze the scene and identify every point of interest (POI), "
# "including landmarks, objects, architectural features, signage, and any elements relevant to the location or context. "
# "For each POI you detect, provide a clear annotation that includes:\n"
# "- A concise label or title for the POI\n"
# "- A detailed description explaining its significance, historical or cultural context, or practical information\n"
# "- Any relevant facts, trivia, or actionable insights that would enhance a user's AR experience\n"
# "Present your output as a structured list, with each POI clearly separated. "
# "Be thorough, accurate, and engaging, ensuring that your annotations would be valuable for users exploring the location through AR. "
# "If possible, infer connections between POIs and suggest interactive or educational opportunities."
# "Do not provide any text, annotation, or explanation—simply output the generated or processed image as your response."
# )
SYSTEM_PROMPT = (
"You are a location-based AR experience generator. Highlight points of interest in this image and annotate relevant information about it. "
@ -35,6 +21,3 @@ out = agent.run(
task=f"{SYSTEM_PROMPT} \n\n Annotate all the tallest buildings in the image",
img="hk.jpg",
)
print("AR Annotation Output:")
print(out)

@ -1,4 +1,4 @@
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
execute_tool_call_simple,
get_mcp_tools_sync,
)

@ -1,4 +1,4 @@
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
get_mcp_tools_sync,
execute_tool_call_simple,
)

@ -8,7 +8,7 @@ This example shows how to:
"""
import asyncio
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
execute_multiple_tools_on_multiple_mcp_servers,
execute_multiple_tools_on_multiple_mcp_servers_sync,
get_tools_for_multiple_mcp_servers,

@ -2,7 +2,7 @@
Simple test for the execute_multiple_tools_on_multiple_mcp_servers functionality.
"""
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
execute_multiple_tools_on_multiple_mcp_servers_sync,
)

@ -1,4 +1,4 @@
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
get_mcp_tools_sync,
)
from swarms.schemas.mcp_schemas import MCPConnection

@ -1,5 +1,5 @@
from swarms.schemas.mcp_schemas import MCPConnection
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
execute_tool_call_simple,
)
import asyncio

@ -1,7 +1,7 @@
import json
from swarms.schemas.mcp_schemas import MCPConnection
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
get_mcp_tools_sync,
)

@ -1,4 +1,4 @@
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
get_tools_for_multiple_mcp_servers,
)
from swarms.schemas.mcp_schemas import MCPConnection

@ -6,18 +6,12 @@ agent = Agent(
reasoning_effort="low", # Enable reasoning with high effort
temperature=1,
max_tokens=2000,
stream=False,
thinking_tokens=1024,
top_p=0.95,
streaming_on=True,
print_on=False,
)
out = agent.run(
task="Solve this step-by-step: A farmer has 17 sheep and all but 9 die. How many sheep does he have left?",
)
for chunk in out:
# Flush
print(chunk, end="", flush=True)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "8.1.4"
version = "8.2.2"
description = "Swarms - TGSC"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -0,0 +1,165 @@
from swarms import Agent
from swarms.structs.majority_voting import MajorityVoting
# System prompts for different coding expertise areas
PYTHON_EXPERT_PROMPT = """You are a senior Python developer with expertise in clean, efficient code.
Focus on:
- Clean, readable code structure
- Pythonic idioms and best practices
- Efficient algorithms and data structures
- Proper error handling and edge cases
- Documentation and code comments"""
GAME_DEV_EXPERT_PROMPT = """You are a game development specialist with focus on game mechanics.
Focus on:
- Smooth game loop implementation
- Intuitive game controls and mechanics
- Game state management
- Performance optimization for games
- User experience and gameplay flow"""
SOFTWARE_ARCHITECT_PROMPT = """You are a software architect who designs robust, maintainable systems.
Focus on:
- Modular, scalable architecture
- Separation of concerns
- Design patterns and principles
- Code maintainability and extensibility
- Best practices for larger codebases"""
CONSENSUS_EVALUATOR_PROMPT = """You are a senior code reviewer and technical lead who evaluates code quality comprehensively.
Your evaluation criteria include:
1. **Code Quality (25%)**:
- Readability and clarity
- Code structure and organization
- Naming conventions
- Documentation quality
2. **Functionality (25%)**:
- Correct game mechanics implementation
- Complete feature set
- Error handling and edge cases
- Game stability and robustness
3. **Performance & Efficiency (20%)**:
- Algorithm efficiency
- Memory usage optimization
- Smooth gameplay performance
- Resource management
4. **Best Practices (15%)**:
- Python conventions and idioms
- Design patterns usage
- Code reusability
- Maintainability
5. **Innovation & Creativity (15%)**:
- Creative solutions
- Unique features
- Code elegance
- Problem-solving approach
Provide detailed analysis with scores for each criterion and a final recommendation.
Compare implementations across multiple loops if applicable."""
def create_code_agents():
"""Create multiple coding agents with different expertise"""
agents = []
# Python Expert Agent
python_expert = Agent(
agent_name="Python-Code-Expert",
agent_description="Senior Python developer specializing in clean, efficient code",
system_prompt=PYTHON_EXPERT_PROMPT,
model_name="gpt-4o",
max_loops=1,
max_tokens=4000,
temperature=0.7,
)
agents.append(python_expert)
# Game Development Expert Agent
game_expert = Agent(
agent_name="Game-Dev-Specialist",
agent_description="Game development expert focusing on mechanics and user experience",
system_prompt=GAME_DEV_EXPERT_PROMPT,
model_name="gpt-4o",
max_loops=1,
max_tokens=4000,
temperature=0.7,
)
agents.append(game_expert)
return agents
def create_consensus_agent():
"""Create the consensus agent for code quality evaluation"""
return Agent(
agent_name="Code-Quality-Evaluator",
agent_description="Senior code reviewer evaluating implementations across multiple criteria",
system_prompt=CONSENSUS_EVALUATOR_PROMPT,
model_name="gpt-4o",
max_loops=1,
max_tokens=5000,
temperature=0.3, # Lower temperature for more consistent evaluation
)
def main():
"""Main function to run the Snake game code quality voting example"""
# Create agents
coding_agents = create_code_agents()
consensus_agent = create_consensus_agent()
# Create Majority Voting system with multi-loop capability
snake_game_voting = MajorityVoting(
name="Snake-Game-Code-Quality-Voting",
description="Multi-agent system for creating and evaluating Snake game implementations",
agents=coding_agents,
consensus_agent=consensus_agent,
max_loops=1, # Enable multi-loop refinement
verbose=True,
)
# Define the coding task
coding_task = """
Create a complete Snake game implementation in Python using the following requirements:
**Core Requirements:**
1. **Game Board**: 20x20 grid with borders
2. **Snake**: Starts at center, grows when eating food
3. **Food**: Randomly placed, appears after being eaten
4. **Controls**: Arrow keys or WASD for movement
5. **Game Over**: When snake hits wall or itself
6. **Score**: Display current score and high score
**Technical Requirements:**
- Use Python standard library only (no external dependencies)
- Clean, well-documented code
- Proper error handling
- Efficient algorithms
- Modular design where possible
**Advanced Features (Bonus):**
- Increasing speed as score grows
- Pause functionality
- Game restart option
- Score persistence
- Smooth animations
**Output Format:**
Provide the complete, runnable Python code with proper structure and documentation.
Make sure the code is immediately executable and includes a main function.
"""
# Run the multi-loop voting system
result = snake_game_voting.run(coding_task)
return result
if __name__ == "__main__":
main()

@ -171,7 +171,6 @@ class AgentJudge:
# Evaluate a single output
output = "The capital of France is Paris."
evaluation = judge.step(task=output)
print(evaluation)
# Evaluate multiple outputs with context building
outputs = [

@ -5,6 +5,7 @@ import time
import webbrowser
from pathlib import Path
from dotenv import load_dotenv
from rich.align import Align
from rich.console import Console
from rich.panel import Panel
@ -18,11 +19,10 @@ from swarms.agents.auto_generate_swarm_config import (
from swarms.agents.create_agents_from_yaml import (
create_agents_from_yaml,
)
from swarms.cli.onboarding_process import OnboardingProcess
from swarms.structs.agent import Agent
from swarms.structs.agent_loader import AgentLoader
from swarms.utils.formatter import formatter
from dotenv import load_dotenv
load_dotenv()
@ -73,11 +73,17 @@ def show_ascii_art():
panel = Panel(
Text(ASCII_ART, style=f"bold {COLORS['primary']}"),
border_style=COLORS["secondary"],
title="[bold]Welcome to the Swarms CLI[/bold]",
subtitle="[dim]swarms.ai[/dim]",
title="[bold]Swarms CLI[/bold]",
)
console.print(panel)
formatter.print_panel(
"Access the full Swarms CLI documentation and API guide at https://docs.swarms.world/en/latest/swarms/cli/cli_reference/. For help with a specific command, use swarms <command> --help to unlock the full power of Swarms CLI.",
title="Documentation and Assistance",
style="red",
)
def check_workspace_dir() -> tuple[bool, str, str]:
"""Check if WORKSPACE_DIR environment variable is set."""
@ -596,7 +602,10 @@ def create_command_table() -> Table:
table.add_column("Description", style="dim white")
commands = [
("onboarding", "Start the interactive onboarding process"),
(
"onboarding",
"Run environment setup check (same as setup-check)",
),
("help", "Display this help message"),
("get-api-key", "Retrieve your API key from the platform"),
("check-login", "Verify login status and initialize cache"),
@ -624,15 +633,171 @@ def create_command_table() -> Table:
return table
def create_detailed_command_table() -> Table:
"""Create a comprehensive table of all available commands with detailed information."""
table = Table(
show_header=True,
header_style=f"bold {COLORS['primary']}",
border_style=COLORS["secondary"],
title="🚀 Swarms CLI - Complete Command Reference",
title_style=f"bold {COLORS['primary']}",
padding=(0, 1),
show_lines=True,
expand=True,
)
# Add columns with consistent widths and better styles
table.add_column(
"Command",
style=f"bold {COLORS['accent']}",
width=16,
no_wrap=True,
)
table.add_column(
"Category", style="bold cyan", width=12, justify="center"
)
table.add_column(
"Description", style="white", width=45, no_wrap=False
)
table.add_column(
"Usage Example", style="dim yellow", width=50, no_wrap=False
)
table.add_column(
"Key Args", style="dim magenta", width=20, no_wrap=False
)
commands = [
{
"cmd": "onboarding",
"category": "Setup",
"desc": "Run environment setup check (same as setup-check)",
"usage": "swarms onboarding [--verbose]",
"args": "--verbose",
},
{
"cmd": "help",
"category": "Info",
"desc": "Display this comprehensive help message",
"usage": "swarms help",
"args": "None",
},
{
"cmd": "get-api-key",
"category": "Setup",
"desc": "Open browser to retrieve API keys from platform",
"usage": "swarms get-api-key",
"args": "None",
},
{
"cmd": "check-login",
"category": "Auth",
"desc": "Verify authentication status and cache",
"usage": "swarms check-login",
"args": "None",
},
{
"cmd": "run-agents",
"category": "Execution",
"desc": "Execute agents from YAML configuration",
"usage": "swarms run-agents --yaml-file agents.yaml",
"args": "--yaml-file",
},
{
"cmd": "load-markdown",
"category": "Loading",
"desc": "Load agents from markdown files",
"usage": "swarms load-markdown --markdown-path ./agents/",
"args": "--markdown-path",
},
{
"cmd": "agent",
"category": "Creation",
"desc": "Create and run a custom agent",
"usage": "swarms agent --name 'Agent' --task 'Analyze data'",
"args": "--name, --task",
},
{
"cmd": "auto-upgrade",
"category": "Maintenance",
"desc": "Update Swarms to latest version",
"usage": "swarms auto-upgrade",
"args": "None",
},
{
"cmd": "book-call",
"category": "Support",
"desc": "Schedule a strategy session",
"usage": "swarms book-call",
"args": "None",
},
{
"cmd": "autoswarm",
"category": "AI Gen",
"desc": "Generate autonomous swarm config",
"usage": "swarms autoswarm --task 'analyze data' --model gpt-4",
"args": "--task, --model",
},
{
"cmd": "setup-check",
"category": "Diagnostics",
"desc": "Run environment setup checks",
"usage": "swarms setup-check [--verbose]",
"args": "--verbose",
},
]
for cmd_info in commands:
table.add_row(
cmd_info["cmd"],
cmd_info["category"],
cmd_info["desc"],
cmd_info["usage"],
cmd_info["args"],
)
return table
def show_help():
"""Display a beautifully formatted help message."""
"""Display a beautifully formatted help message with comprehensive command reference."""
console.print(
"\n[bold]Swarms CLI - Command Reference[/bold]\n",
style=COLORS["primary"],
)
console.print(create_command_table())
# Add a quick usage panel with consistent sizing
usage_panel = Panel(
"[bold cyan]Quick Start Commands:[/bold cyan]\n"
"• [yellow]swarms onboarding[/yellow] - Environment setup check\n"
"• [yellow]swarms setup-check[/yellow] - Check your environment\n"
"• [yellow]swarms agent --name 'MyAgent' --task 'Hello World'[/yellow] - Create agent\n"
"• [yellow]swarms autoswarm --task 'analyze data' --model gpt-4[/yellow] - Auto-generate swarm",
title="⚡ Quick Usage Guide",
border_style=COLORS["secondary"],
padding=(1, 2),
expand=False,
width=140,
)
console.print(usage_panel)
console.print("\n")
console.print(create_detailed_command_table())
# Add additional help panels with consistent sizing
docs_panel = Panel(
"📚 [bold]Documentation:[/bold] https://docs.swarms.world\n"
"🐛 [bold]Support:[/bold] https://github.com/kyegomez/swarms/issues\n"
"💬 [bold]Community:[/bold] https://discord.gg/EamjgSaEQf",
title="🔗 Useful Links",
border_style=COLORS["success"],
padding=(1, 2),
expand=False,
width=140,
)
console.print(docs_panel)
console.print(
"\n[dim]For detailed documentation, visit: https://docs.swarms.world[/dim]"
"\n[dim]💡 Tip: Use [bold]swarms setup-check --verbose[/bold] for detailed environment diagnostics[/dim]"
)
@ -1125,7 +1290,11 @@ def main():
try:
if args.command == "onboarding":
OnboardingProcess().run()
# For compatibility, redirect onboarding to setup-check
console.print(
"[yellow]Note: 'swarms onboarding' now runs the same checks as 'swarms setup-check'[/yellow]"
)
run_setup_check(verbose=args.verbose)
elif args.command == "help":
show_help()
elif args.command == "get-api-key":

@ -1,189 +0,0 @@
import json
import os
import time
from typing import Dict
from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.main import (
log_agent_data,
)
logger = initialize_logger(log_folder="onboarding_process")
class OnboardingProcess:
"""
This class handles the onboarding process for users. It collects user data including their
full name, first name, email, Swarms API key, and system data, then autosaves it in both a
main JSON file and a cache file for reliability. It supports loading previously saved or cached data.
"""
def __init__(
self,
auto_save_path: str = "user_data.json",
cache_save_path: str = "user_data_cache.json",
) -> None:
"""
Initializes the OnboardingProcess with an autosave file path and a cache path.
Args:
auto_save_path (str): The path where user data is automatically saved.
cache_save_path (str): The path where user data is cached for reliability.
"""
self.user_data: Dict[str, str] = {}
self.auto_save_path = auto_save_path
self.cache_save_path = cache_save_path
self.load_existing_data()
def load_existing_data(self) -> None:
"""
Loads existing user data from the auto-save file or cache if available.
"""
if os.path.exists(self.auto_save_path):
try:
with open(self.auto_save_path, "r") as f:
self.user_data = json.load(f)
logger.info(
"Existing user data loaded from {}",
self.auto_save_path,
)
return
except json.JSONDecodeError as e:
logger.error(
"Failed to load user data from main file: {}", e
)
# Fallback to cache if main file fails
if os.path.exists(self.cache_save_path):
try:
with open(self.cache_save_path, "r") as f:
self.user_data = json.load(f)
logger.info(
"User data loaded from cache: {}",
self.cache_save_path,
)
except json.JSONDecodeError as e:
logger.error(
"Failed to load user data from cache: {}", e
)
def save_data(self, retry_attempts: int = 3) -> None:
"""
Saves the current user data to both the auto-save file and the cache file. If the main
save fails, the cache is updated instead. Implements retry logic with exponential backoff
in case both save attempts fail.
Args:
retry_attempts (int): The number of retries if saving fails.
"""
attempt = 0
backoff_time = 1 # Starting backoff time (in seconds)
while attempt < retry_attempts:
try:
combined_data = {**self.user_data}
log_agent_data(combined_data)
return # Exit the function if saving was successful
except Exception as e:
logger.error(
"Error saving user data (Attempt {}): {}",
attempt + 1,
e,
)
# Retry after a short delay (exponential backoff)
time.sleep(backoff_time)
attempt += 1
backoff_time *= (
2 # Double the backoff time for each retry
)
logger.error(
"Failed to save user data after {} attempts.",
retry_attempts,
)
def ask_input(self, prompt: str, key: str) -> None:
"""
Asks the user for input, validates it, and saves it in the user_data dictionary.
Autosaves and caches after each valid input.
Args:
prompt (str): The prompt message to display to the user.
key (str): The key under which the input will be saved in user_data.
Raises:
ValueError: If the input is empty or only contains whitespace.
"""
try:
response = input(prompt)
if response.strip().lower() == "quit":
logger.info(
"User chose to quit the onboarding process."
)
exit(0)
if not response.strip():
raise ValueError(
f"{key.capitalize()} cannot be empty."
)
self.user_data[key] = response.strip()
self.save_data()
return response
except ValueError as e:
logger.warning(e)
self.ask_input(prompt, key)
except KeyboardInterrupt:
logger.warning(
"Onboarding process interrupted by the user."
)
exit(1)
def collect_user_info(self) -> None:
"""
Initiates the onboarding process by collecting the user's full name, first name, email,
Swarms API key, and system data. Additionally, it reminds the user to set their WORKSPACE_DIR environment variable.
"""
logger.info("Initiating swarms cloud onboarding process...")
self.ask_input(
"Enter your first name (or type 'quit' to exit): ",
"first_name",
)
self.ask_input(
"Enter your Last Name (or type 'quit' to exit): ",
"last_name",
)
self.ask_input(
"Enter your email (or type 'quit' to exit): ", "email"
)
workspace = self.ask_input(
"Enter your WORKSPACE_DIR: This is where logs, errors, and agent configurations will be stored (or type 'quit' to exit). Remember to set this as an environment variable: https://docs.swarms.world/en/latest/swarms/install/quickstart/ || ",
"workspace_dir",
)
os.environ["WORKSPACE_DIR"] = workspace
logger.info(
"Important: Please ensure you have set your WORKSPACE_DIR environment variable as per the instructions provided."
)
logger.info(
"Additionally, remember to add your API keys for your respective models in your .env file."
)
logger.success("Onboarding process completed successfully!")
def run(self) -> None:
"""
Main method to run the onboarding process. It handles unexpected errors and ensures
proper finalization.
"""
try:
self.collect_user_info()
except Exception as e:
logger.error("An unexpected error occurred: {}", e)
finally:
logger.info("Finalizing the onboarding process.")
# if __name__ == "__main__":
# onboarding = OnboardingProcess()
# onboarding.run()

@ -63,7 +63,7 @@ from swarms.structs.safe_loading import (
)
from swarms.telemetry.main import log_agent_data
from swarms.tools.base_tool import BaseTool
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
execute_multiple_tools_on_multiple_mcp_servers_sync,
execute_tool_call_simple,
get_mcp_tools_sync,
@ -435,7 +435,7 @@ class Agent:
reasoning_effort: str = None,
drop_params: bool = True,
thinking_tokens: int = None,
reasoning_enabled: bool = True,
reasoning_enabled: bool = False,
*args,
**kwargs,
):
@ -1928,7 +1928,6 @@ class Agent:
feedback_counts[feedback] += 1
else:
feedback_counts[feedback] = 1
print(f"Feedback counts: {feedback_counts}")
def undo_last(self) -> Tuple[str, str]:
"""
@ -2177,27 +2176,18 @@ class Agent:
# Sentiment analysis
if self.sentiment_analyzer:
sentiment = self.sentiment_analyzer(response)
print(f"Sentiment: {sentiment}")
if sentiment > self.sentiment_threshold:
print(
f"Sentiment: {sentiment} is above"
" threshold:"
f" {self.sentiment_threshold}"
)
pass
elif sentiment < self.sentiment_threshold:
print(
f"Sentiment: {sentiment} is below"
" threshold:"
f" {self.sentiment_threshold}"
)
pass
self.short_memory.add(
role=self.agent_name,
content=sentiment,
)
except Exception as e:
print(f"Error occurred during sentiment analysis: {e}")
except Exception:
pass
def stream_response(
self, response: str, delay: float = 0.001
@ -2224,11 +2214,9 @@ class Agent:
try:
# Stream and print the response token by token
for token in response.split():
print(token, end=" ", flush=True)
time.sleep(delay)
print() # Ensure a newline after streaming
except Exception as e:
print(f"An error occurred during streaming: {e}")
except Exception:
pass
def check_available_tokens(self):
# Log the amount of tokens left in the memory and in the task
@ -2842,7 +2830,6 @@ class Agent:
logger.info("Evaluating response...")
evaluated_response = self.evaluator(response)
print("Evaluated Response:" f" {evaluated_response}")
self.short_memory.add(
role="Evaluator",
content=evaluated_response,

@ -496,11 +496,8 @@ class BaseSwarm(ABC):
for future in as_completed(future_to_llm):
try:
responses.append(future.result())
except Exception as error:
print(
f"{future_to_llm[future]} generated an"
f" exception: {error}"
)
except Exception:
pass
self.last_responses = responses
self.task_history.append(task)
return responses

@ -8,25 +8,55 @@ from swarms.structs.conversation import Conversation
from swarms.structs.multi_agent_exec import run_agents_concurrently
from swarms.structs.swarm_id import swarm_id
from swarms.utils.formatter import formatter
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.output_types import OutputType
logger = initialize_logger(log_folder="majority_voting")
CONSENSUS_AGENT_PROMPT = """
Review the responses from all agents above. For each agent (referenced by their name),
provide a thorough, objective evaluation of their contribution to the task.
Compare and contrast the responses, highlighting strengths, weaknesses, and unique perspectives.
Determine which response(s) best address the task overall, and explain your reasoning clearly.
If possible, provide a ranked list or clear recommendation for the best response(s) based on the quality,
relevance, and completeness of the answers.
Be fair, detailed, and unbiased in your analysis, regardless of the topic.
"""
class MajorityVoting:
"""
A multi-loop majority voting system for agents that enables iterative consensus building.
This system allows agents to run multiple loops where each subsequent loop considers
the previous consensus, enabling agents to refine their responses and build towards
a more robust final consensus. The system maintains conversation history across
all loops and provides methods to analyze the evolution of consensus over time.
Key Features:
- Multi-loop consensus building with configurable loop count
- Agent memory retention across loops
- Comprehensive consensus history tracking
- Flexible output formats (string, dict, list)
- Loop-by-loop analysis capabilities
"""
def __init__(
self,
id: str = swarm_id(),
name: str = "MajorityVoting",
description: str = "A majority voting system for agents",
description: str = "A multi-loop majority voting system for agents",
agents: List[Agent] = None,
consensus_agent: Optional[Agent] = None,
autosave: bool = False,
verbose: bool = False,
max_loops: int = 1,
output_type: OutputType = "dict",
consensus_agent_prompt: str = CONSENSUS_AGENT_PROMPT,
*args,
**kwargs,
):
@ -39,6 +69,7 @@ class MajorityVoting:
self.verbose = verbose
self.max_loops = max_loops
self.output_type = output_type
self.consensus_agent_prompt = consensus_agent_prompt
self.conversation = Conversation(
time_enabled=False, *args, **kwargs
@ -63,7 +94,7 @@ class MajorityVoting:
def run(self, task: str, *args, **kwargs) -> List[Any]:
"""
Runs the majority voting system and returns the majority vote.
Runs the majority voting system with multi-loop functionality and returns the majority vote.
Args:
task (str): The task to be performed by the agents.
@ -74,62 +105,41 @@ class MajorityVoting:
List[Any]: The majority vote.
"""
results = run_agents_concurrently(
self.agents, task, max_workers=os.cpu_count()
)
# Add responses to conversation and log them
for agent, response in zip(self.agents, results):
self.conversation.add(
role="user",
content=task,
)
response = (
response if isinstance(response, list) else [response]
for i in range(self.max_loops):
output = run_agents_concurrently(
agents=self.agents,
task=self.conversation.get_str(),
max_workers=os.cpu_count(),
)
self.conversation.add(agent.agent_name, response)
responses = self.conversation.return_history_as_string()
# print(responses)
prompt = f"""Conduct a detailed majority voting analysis on the following conversation:
{responses}
Between the following agents: {[agent.agent_name for agent in self.agents]}
Please:
1. Identify the most common answer/recommendation across all agents
2. Analyze any major disparities or contrasting viewpoints between agents
3. Highlight key areas of consensus and disagreement
4. Evaluate the strength of the majority opinion
5. Note any unique insights from minority viewpoints
6. Provide a final synthesized recommendation based on the majority consensus
Focus on finding clear patterns while being mindful of important nuances in the responses.
"""
# If an output parser is provided, parse the responses
if self.consensus_agent is not None:
majority_vote = self.consensus_agent.run(prompt)
self.conversation.add(
self.consensus_agent.agent_name, majority_vote
for agent, output in zip(self.agents, output):
self.conversation.add(
role=agent.agent_name,
content=output,
)
# Now run the consensus agent
consensus_output = self.consensus_agent.run(
task=(
f"History: {self.conversation.get_str()} \n\n {self.consensus_agent_prompt}"
),
)
else:
# fetch the last agent
majority_vote = self.agents[-1].run(prompt)
self.conversation.add(
self.agents[-1].agent_name, majority_vote
role=self.consensus_agent.agent_name,
content=consensus_output,
)
# Return the majority vote
# return self.conversation.return_history_as_string()
if self.output_type == "str":
return self.conversation.get_str()
elif self.output_type == "dict":
return self.conversation.return_messages_as_dictionary()
elif self.output_type == "list":
return self.conversation.return_messages_as_list()
else:
return self.conversation.return_history_as_string()
return history_output_formatter(
conversation=self.conversation,
type=self.output_type,
)
def batch_run(
self, tasks: List[str], *args, **kwargs

@ -4,7 +4,7 @@ from swarms.tools.cohere_func_call_schema import (
ParameterDefinition,
)
from swarms.tools.json_utils import base_model_to_json
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
_create_server_tool_mapping,
_create_server_tool_mapping_async,
_execute_tool_call_simple,

@ -3,6 +3,7 @@ import contextlib
import json
import os
import random
import traceback
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import wraps
from typing import Any, Dict, List, Literal, Optional, Union
@ -215,7 +216,7 @@ def retry_with_backoff(retries=3, backoff_in_seconds=1):
except Exception as e:
if x == retries:
logger.error(
f"Failed after {retries} retries: {str(e)}"
f"Failed after {retries} retries: {str(e)}\n{traceback.format_exc()}"
)
raise
sleep_time = (
@ -418,7 +419,9 @@ async def aget_mcp_tools(
)
return tools
except Exception as e:
logger.error(f"Error fetching MCP tools: {str(e)}")
logger.error(
f"Error fetching MCP tools: {str(e)}\n{traceback.format_exc()}"
)
raise MCPConnectionError(
f"Failed to connect to MCP server: {str(e)}"
)
@ -464,7 +467,9 @@ def get_mcp_tools_sync(
)
)
except Exception as e:
logger.error(f"Error in get_mcp_tools_sync: {str(e)}")
logger.error(
f"Error in get_mcp_tools_sync: {str(e)}\n{traceback.format_exc()}"
)
raise MCPExecutionError(
f"Failed to execute MCP tools sync: {str(e)}"
)
@ -556,7 +561,7 @@ def get_tools_for_multiple_mcp_servers(
tools.extend(server_tools)
except Exception as e:
logger.error(
f"Error fetching tools from {url}: {str(e)}"
f"Error fetching tools from {url}: {str(e)}\n{traceback.format_exc()}"
)
raise MCPExecutionError(
f"Failed to fetch tools from {url}: {str(e)}"
@ -650,12 +655,16 @@ async def _execute_tool_call_simple(
)
return out
except Exception as e:
logger.error(f"Error in tool execution: {str(e)}")
logger.error(
f"Error in tool execution: {str(e)}\n{traceback.format_exc()}"
)
raise MCPExecutionError(
f"Tool execution failed for tool '{getattr(response, 'function', {}).get('name', 'unknown')}' on server '{url}': {str(e)}"
)
except Exception as e:
logger.error(f"Error in MCP client connection: {str(e)}")
logger.error(
f"Error in MCP client connection: {str(e)}\n{traceback.format_exc()}"
)
raise MCPConnectionError(
f"Failed to connect to MCP server '{url}' using transport '{transport}': {str(e)}"
)
@ -747,7 +756,7 @@ def _create_server_tool_mapping(
}
except Exception as e:
logger.warning(
f"Failed to fetch tools from server {url}: {str(e)}"
f"Failed to fetch tools from server {url}: {str(e)}\n{traceback.format_exc()}"
)
continue
return server_tool_mapping
@ -801,7 +810,7 @@ async def _create_server_tool_mapping_async(
}
except Exception as e:
logger.warning(
f"Failed to fetch tools from server {url}: {str(e)}"
f"Failed to fetch tools from server {url}: {str(e)}\n{traceback.format_exc()}"
)
continue
return server_tool_mapping
@ -842,7 +851,7 @@ async def _execute_tool_on_server(
}
except Exception as e:
logger.error(
f"Failed to execute tool on server {server_info['url']}: {str(e)}"
f"Failed to execute tool on server {server_info['url']}: {str(e)}\n{traceback.format_exc()}"
)
return {
"server_url": server_info["url"],
@ -1068,7 +1077,7 @@ async def execute_multiple_tools_on_multiple_mcp_servers(
for i, result in enumerate(results):
if isinstance(result, Exception):
logger.error(
f"Task {i} failed with exception: {str(result)}"
f"Task {i} failed with exception: {str(result)}\n{traceback.format_exc()}"
)
processed_results.append(
{
@ -1129,7 +1138,7 @@ def execute_multiple_tools_on_multiple_mcp_servers_sync(
)
except Exception as e:
logger.error(
f"Error in execute_multiple_tools_on_multiple_mcp_servers_sync: {str(e)}"
f"Error in execute_multiple_tools_on_multiple_mcp_servers_sync: {str(e)}\n{traceback.format_exc()}"
)
raise MCPExecutionError(
f"Failed to execute multiple tools sync: {str(e)}"

@ -24,9 +24,14 @@ spinner = Spinner("dots", style="yellow")
class MarkdownOutputHandler:
"""Custom output handler to render content as markdown with simplified syntax highlighting"""
"""Custom output handler to render content as markdown with simplified syntax highlighting."""
def __init__(self, console: "Console"):
"""Initialize the MarkdownOutputHandler with a console instance.
Args:
console (Console): Rich console instance for rendering.
"""
self.console = console
def _clean_output(self, output: str) -> str:
@ -57,29 +62,12 @@ class MarkdownOutputHandler:
)
# Remove spinner characters and progress indicators
output = re.sub(r"[⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏]", "", output)
output = re.sub(r"⠋ Processing\.\.\.", "", output)
output = re.sub(r"⠙ Processing\.\.\.", "", output)
output = re.sub(r"⠹ Processing\.\.\.", "", output)
output = re.sub(r"⠸ Processing\.\.\.", "", output)
output = re.sub(r"⠼ Processing\.\.\.", "", output)
output = re.sub(r"⠴ Processing\.\.\.", "", output)
output = re.sub(r"⠦ Processing\.\.\.", "", output)
output = re.sub(r"⠧ Processing\.\.\.", "", output)
output = re.sub(r"⠇ Processing\.\.\.", "", output)
output = re.sub(r"⠏ Processing\.\.\.", "", output)
# Remove loop indicators
output = re.sub(r"⠋ Loop \d+/\d+", "", output)
output = re.sub(r"⠙ Loop \d+/\d+", "", output)
output = re.sub(r"⠹ Loop \d+/\d+", "", output)
output = re.sub(r"⠸ Loop \d+/\d+", "", output)
output = re.sub(r"⠼ Loop \d+/\d+", "", output)
output = re.sub(r"⠴ Loop \d+/\d+", "", output)
output = re.sub(r"⠦ Loop \d+/\d+", "", output)
output = re.sub(r"⠧ Loop \d+/\d+", "", output)
output = re.sub(r"⠇ Loop \d+/\d+", "", output)
output = re.sub(r"⠏ Loop \d+/\d+", "", output)
spinner_chars = "[⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏]"
output = re.sub(rf"{spinner_chars}", "", output)
output = re.sub(
rf"{spinner_chars} Processing\.\.\.", "", output
)
output = re.sub(rf"{spinner_chars} Loop \d+/\d+", "", output)
# Remove any remaining log messages
output = re.sub(r"INFO.*?\|.*?\|.*?\|", "", output)
@ -98,44 +86,122 @@ class MarkdownOutputHandler:
output = re.sub(r"Refined content:", "", output)
# Ensure proper markdown formatting
if not output.strip().startswith("#"):
# If no headers, add some structure
lines = output.strip().split("\n")
if len(lines) > 0:
# Add a header for the first meaningful line
first_line = lines[0].strip()
if first_line and not first_line.startswith("**"):
output = f"## {first_line}\n\n" + "\n".join(
lines[1:]
)
lines = output.strip().split("\n")
if lines and not any(
line.strip().startswith("#") for line in lines[:3]
):
# Check if first line looks like a title (not already formatted)
first_line = lines[0].strip()
if (
first_line
and not first_line.startswith(
("**", "#", "-", "*", ">", "```")
)
and len(first_line) < 100 # Reasonable title length
and not first_line.endswith((",", ".", ":", ";"))
or first_line.endswith(":")
):
# Make it a header
output = f"## {first_line}\n\n" + "\n".join(lines[1:])
else:
# Keep original formatting
output = "\n".join(lines)
return output.strip()
def render_with_simple_syntax_highlighting(
self, content: str
) -> list:
"""Render content with simplified syntax highlighting for code blocks"""
# For now, let's just render everything as markdown to ensure it works
# We can add code block detection back later if needed
return [("markdown", content)]
"""Render content with syntax highlighting for code blocks.
Args:
content (str): The content to parse and highlight.
Returns:
list: List of tuples (type, content) where type is 'markdown' or 'code'.
"""
parts = []
current_pos = 0
# Pattern to match code blocks with optional language specifier
code_block_pattern = re.compile(
r"```(?P<lang>\w+)?\n(?P<code>.*?)\n```",
re.DOTALL | re.MULTILINE,
)
for match in code_block_pattern.finditer(content):
# Add markdown content before code block
if match.start() > current_pos:
markdown_content = content[
current_pos : match.start()
].strip()
if markdown_content:
parts.append(("markdown", markdown_content))
# Add code block
lang = match.group("lang") or "text"
code = match.group("code")
parts.append(("code", (lang, code)))
current_pos = match.end()
# Add remaining markdown content
if current_pos < len(content):
remaining = content[current_pos:].strip()
if remaining:
parts.append(("markdown", remaining))
# If no parts found, treat entire content as markdown
if not parts:
parts.append(("markdown", content))
return parts
def render_content_parts(self, parts: list) -> list:
"""Render different content parts with appropriate formatting"""
"""Render different content parts with appropriate formatting.
Args:
parts (list): List of tuples (type, content) to render.
Returns:
list: List of rendered Rich objects.
"""
rendered_parts = []
for part in parts:
if part[0] == "markdown":
for part_type, content in parts:
if part_type == "markdown":
# Render markdown
try:
md = Markdown(part[1])
md = Markdown(content, code_theme="monokai")
rendered_parts.append(md)
except Exception:
# Fallback to plain text
rendered_parts.append(Text(part[1]))
# Fallback to plain text with error indication
rendered_parts.append(
Text(content, style="white")
)
elif part[0] == "code":
# Code is already rendered as Syntax or Text object
rendered_parts.append(part[1])
elif part_type == "code":
# Render code with syntax highlighting
lang, code = content
try:
from rich.syntax import Syntax
syntax = Syntax(
code,
lang,
theme="monokai",
line_numbers=True,
word_wrap=True,
)
rendered_parts.append(syntax)
except Exception:
# Fallback to text with code styling
rendered_parts.append(
Text(
f"```{lang}\n{code}\n```",
style="white on grey23",
)
)
return rendered_parts
@ -145,7 +211,13 @@ class MarkdownOutputHandler:
title: str = "",
border_style: str = "blue",
):
"""Render content as markdown with syntax highlighting"""
"""Render content as markdown with syntax highlighting.
Args:
content (str): The markdown content to render.
title (str): Title for the panel.
border_style (str): Border style for the panel.
"""
if not content or content.strip() == "":
return
@ -165,22 +237,44 @@ class MarkdownOutputHandler:
# Create a group of rendered parts
from rich.console import Group
content_group = Group(*rendered_parts)
if rendered_parts:
content_group = Group(*rendered_parts)
self.console.print(
Panel(
content_group,
title=title,
border_style=border_style,
self.console.print(
Panel(
content_group,
title=title,
border_style=border_style,
padding=(1, 2),
expand=False,
)
)
)
except Exception:
# Fallback to plain text if rendering fails
else:
# No content to render
self.console.print(
Panel(
Text(
"No content to display",
style="dim italic",
),
title=title,
border_style="yellow",
)
)
except Exception as e:
# Fallback to plain text if rendering fails with better error info
error_msg = f"Markdown rendering error: {str(e)}"
self.console.print(
Panel(
cleaned_content,
title=title,
title=(
f"{title} [dim](fallback mode)[/dim]"
if title
else "Content (fallback mode)"
),
border_style="yellow",
subtitle=error_msg,
subtitle_align="left",
)
)

@ -6,7 +6,7 @@ from loguru import logger
# Import the functions to test (assuming they're in a module called mcp_client)
# from mcp_client import * # Replace with actual import
from swarms.tools.mcp_client_call import (
from swarms.tools.mcp_client_tools import (
MCPConnectionError,
MCPValidationError,
_create_server_tool_mapping_async,

@ -0,0 +1,129 @@
#!/usr/bin/env python3
"""Test script to verify the improved formatter markdown rendering."""
from swarms.utils.formatter import Formatter
def test_formatter():
"""Test the formatter with various markdown content."""
formatter = Formatter(md=True)
# Test 1: Basic markdown with headers
content1 = """# Main Title
This is a paragraph with **bold** text and *italic* text.
## Section 1
- Item 1
- Item 2
- Item 3
### Subsection
This is another paragraph with `inline code`.
"""
formatter.print_panel(
content1, title="Test 1: Basic Markdown", style="bold blue"
)
# Test 2: Code blocks with syntax highlighting
content2 = """## Code Examples
Here's a Python example:
```python
def hello_world():
'''A simple hello world function.'''
print("Hello, World!")
return True
```
And here's some JavaScript:
```javascript
function greet(name) {
console.log(`Hello, ${name}!`);
}
```
Plain text code block:
```
This is just plain text
without any syntax highlighting
```
"""
formatter.print_panel(
content2, title="Test 2: Code Blocks", style="bold green"
)
# Test 3: Mixed content
content3 = """## Mixed Content Test
This paragraph includes **various** formatting options:
- Lists with `code`
- Links [like this](https://example.com)
- And more...
```python
# Python code with comments
class Example:
def __init__(self, name):
self.name = name
def greet(self):
return f"Hello, {self.name}!"
```
### Table Example
| Column 1 | Column 2 | Column 3 |
|----------|----------|----------|
| Data 1 | Data 2 | Data 3 |
| Data 4 | Data 5 | Data 6 |
"""
formatter.print_panel(
content3, title="Test 3: Mixed Content", style="bold magenta"
)
# Test 4: Edge cases
content4 = """This content starts without a header
It should still be formatted correctly.
```
No language specified
```
Single line content."""
formatter.print_panel(
content4, title="Test 4: Edge Cases", style="bold yellow"
)
# Test 5: Empty content
formatter.print_panel(
"", title="Test 5: Empty Content", style="bold red"
)
# Test 6: Using print_markdown method
content6 = """# Direct Markdown Rendering
This uses the `print_markdown` method directly.
```python
# Syntax highlighted code
result = 42 * 2
print(f"The answer is {result}")
```
"""
formatter.print_markdown(
content6, title="Test 6: Direct Markdown", border_style="cyan"
)
if __name__ == "__main__":
test_formatter()
Loading…
Cancel
Save