[DELETE Un-used files] [BUGF][Swarm Router + concurrent workflow issue]

pull/1152/head
Kye Gomez 4 days ago
parent 4a57c20aa8
commit 7a7b4a4c07

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "8.5.1"
version = "8.5.2"
description = "Swarms - TGSC"
license = "MIT"
authors = ["Kye Gomez <kye@swarms.world>"]

@ -1,40 +0,0 @@
from typing import Callable
from swarms.schemas.agent_class_schema import AgentConfiguration
from swarms.tools.create_agent_tool import create_agent_tool
from swarms.prompts.agent_self_builder_prompt import (
generate_agent_system_prompt,
)
from swarms.tools.base_tool import BaseTool
from swarms.structs.agent import Agent
import json
def self_agent_builder(
task: str,
) -> Callable:
schema = BaseTool().base_model_to_dict(AgentConfiguration)
schema = [schema]
print(json.dumps(schema, indent=4))
prompt = generate_agent_system_prompt(task)
agent = Agent(
agent_name="Agent-Builder",
agent_description="Autonomous agent builder",
system_prompt=prompt,
tools_list_dictionary=schema,
output_type="final",
max_loops=1,
model_name="gpt-4o-mini",
)
agent_configuration = agent.run(
f"Create the agent configuration for the task: {task}"
)
print(agent_configuration)
print(type(agent_configuration))
build_new_agent = create_agent_tool(agent_configuration)
return build_new_agent

@ -1,43 +0,0 @@
from swarms.structs.agent import Agent
# Run the agents in the registry
def run_agent_by_name(
name: str,
system_prompt: str,
model_name: str,
max_loops: int,
task: str,
img: str,
*args,
**kwargs,
):
"""
This function creates an Agent instance and runs a task on it.
Args:
name (str): The name of the agent.
system_prompt (str): The system prompt for the agent.
model_name (str): The name of the model used by the agent.
max_loops (int): The maximum number of loops the agent can run.
task (str): The task to be run by the agent.
*args: Variable length arguments.
**kwargs: Keyword arguments.
Returns:
The output of the task run by the agent.
"""
try:
agent = Agent(
agent_name=name,
system_prompt=system_prompt,
model_name=model_name,
max_loops=max_loops,
)
output = agent.run(task=task, img=img, *args, **kwargs)
return output
except Exception as e:
print(f"An error occurred: {str(e)}")
return None

@ -21,11 +21,4 @@ TREATMENT_PLAN_PROMPT = """
def analyze_xray_image(xray_analysis: str):
return f"""
"Imagine you are a radiology resident tasked with developing a treatment plan for a patient. "
"Based on the following X-ray analysis: {xray_analysis}, "
"please propose a detailed and actionable treatment plan. "
"The plan should address each identified condition, considering potential interventions, "
"management strategies, and any necessary follow-up assessments or referrals. "
"Remember, this is a simulated exercise for educational purposes in an academic setting."
"""
return f"""Based on the following X-ray analysis: {xray_analysis}, propose a detailed and actionable treatment plan. Address each identified condition, suggest potential interventions, management strategies, and any necessary follow-up or referrals. This is a simulated exercise for educational purposes."""

@ -1,9 +1,3 @@
"""
This is a schema that enables the agent to generate it's self.
"""
from pydantic import BaseModel, Field
from typing import Optional

@ -1,71 +0,0 @@
from datetime import datetime
from typing import Any, List, Optional
from pydantic import BaseModel, Field
class Usage(BaseModel):
prompt_tokens: Optional[int] = Field(
default=None,
description="Number of tokens used in the prompt",
)
completion_tokens: Optional[int] = Field(
default=None,
description="Number of tokens used in the completion",
)
total_tokens: Optional[int] = Field(
default=None, description="Total number of tokens used"
)
class ModelConfig(BaseModel):
model_name: Optional[str] = Field(
default=None,
description="Name of the model used for generation",
)
temperature: Optional[float] = Field(
default=None,
description="Temperature setting used for generation",
)
top_p: Optional[float] = Field(
default=None, description="Top-p setting used for generation"
)
max_tokens: Optional[int] = Field(
default=None,
description="Maximum number of tokens to generate",
)
frequency_penalty: Optional[float] = Field(
default=None,
description="Frequency penalty used for generation",
)
presence_penalty: Optional[float] = Field(
default=None,
description="Presence penalty used for generation",
)
class AgentCompletionResponse(BaseModel):
id: Optional[str] = Field(
default=None, description="Unique identifier for the response"
)
agent_name: Optional[str] = Field(
default=None,
description="Name of the agent that generated the response",
)
agent_description: Optional[str] = Field(
default=None, description="Description of the agent"
)
outputs: Optional[List[Any]] = Field(
default=None,
description="List of outputs generated by the agent",
)
usage: Optional[Usage] = Field(
default=None, description="Token usage statistics"
)
model_config: Optional[ModelConfig] = Field(
default=None, description="Model configuration"
)
timestamp: Optional[str] = Field(
default_factory=lambda: datetime.now().isoformat(),
description="Timestamp of when the response was generated",
)

@ -1,7 +0,0 @@
from pydantic import BaseModel
class AgentRAGConfig(BaseModel):
"""
Configuration for the AgentRAG class.
"""

@ -1,13 +0,0 @@
from pydantic import BaseModel
from typing import List, Dict, Any, Optional, Callable
from swarms.schemas.mcp_schemas import MCPConnection
class AgentToolTypes(BaseModel):
tool_schema: List[Dict[str, Any]]
mcp_connection: MCPConnection
tool_model: Optional[BaseModel]
tool_functions: Optional[List[Callable]]
class Config:
arbitrary_types_allowed = True

@ -1,38 +0,0 @@
from pydantic import BaseModel
from swarms.tools.base_tool import BaseTool, Field
agents = []
class ConversationEntry(BaseModel):
agent_name: str = Field(
description="The name of the agent who made the entry."
)
message: str = Field(description="The message sent by the agent.")
class LeaveConversation(BaseModel):
agent_name: str = Field(
description="The name of the agent who left the conversation."
)
class JoinGroupChat(BaseModel):
agent_name: str = Field(
description="The name of the agent who joined the conversation."
)
group_chat_name: str = Field(
description="The name of the group chat."
)
initial_message: str = Field(
description="The initial message sent by the agent."
)
conversation_entry = BaseTool().base_model_to_dict(ConversationEntry)
leave_conversation = BaseTool().base_model_to_dict(LeaveConversation)
join_group_chat = BaseTool().base_model_to_dict(JoinGroupChat)
print(conversation_entry)
print(leave_conversation)
print(join_group_chat)

@ -1,110 +0,0 @@
from pydantic import BaseModel, Field
from typing import Optional
# from litellm.types import (
# ChatCompletionPredictionContentParam,
# )
# class LLMCompletionRequest(BaseModel):
# """Schema for LLM completion request parameters."""
# model: Optional[str] = Field(
# default=None,
# description="The name of the language model to use for text completion",
# )
# temperature: Optional[float] = Field(
# default=0.5,
# description="Controls randomness of the output (0.0 to 1.0)",
# )
# top_p: Optional[float] = Field(
# default=None,
# description="Controls diversity via nucleus sampling",
# )
# n: Optional[int] = Field(
# default=None, description="Number of completions to generate"
# )
# stream: Optional[bool] = Field(
# default=None, description="Whether to stream the response"
# )
# stream_options: Optional[dict] = Field(
# default=None, description="Options for streaming response"
# )
# stop: Optional[Any] = Field(
# default=None,
# description="Up to 4 sequences where the API will stop generating",
# )
# max_completion_tokens: Optional[int] = Field(
# default=None,
# description="Maximum tokens for completion including reasoning",
# )
# max_tokens: Optional[int] = Field(
# default=None,
# description="Maximum tokens in generated completion",
# )
# prediction: Optional[ChatCompletionPredictionContentParam] = (
# Field(
# default=None,
# description="Configuration for predicted output",
# )
# )
# presence_penalty: Optional[float] = Field(
# default=None,
# description="Penalizes new tokens based on existence in text",
# )
# frequency_penalty: Optional[float] = Field(
# default=None,
# description="Penalizes new tokens based on frequency in text",
# )
# logit_bias: Optional[dict] = Field(
# default=None,
# description="Modifies probability of specific tokens",
# )
# reasoning_effort: Optional[Literal["low", "medium", "high"]] = (
# Field(
# default=None,
# description="Level of reasoning effort for the model",
# )
# )
# seed: Optional[int] = Field(
# default=None, description="Random seed for reproducibility"
# )
# tools: Optional[List] = Field(
# default=None,
# description="List of tools available to the model",
# )
# tool_choice: Optional[Union[str, dict]] = Field(
# default=None, description="Choice of tool to use"
# )
# logprobs: Optional[bool] = Field(
# default=None,
# description="Whether to return log probabilities",
# )
# top_logprobs: Optional[int] = Field(
# default=None,
# description="Number of most likely tokens to return",
# )
# parallel_tool_calls: Optional[bool] = Field(
# default=None,
# description="Whether to allow parallel tool calls",
# )
# class Config:
# allow_arbitrary_types = True
class ModelConfigOrigin(BaseModel):
"""Schema for model configuration origin."""
model_url: Optional[str] = Field(
default=None,
description="The URL of the model to use for text completion",
)
api_key: Optional[str] = Field(
default=None,
description="The API key to use for the model",
)
class Config:
allow_arbitrary_types = True

@ -31,27 +31,3 @@ class Tool(BaseModel):
class ToolSet(BaseModel):
tools: List[Tool]
# model = ToolSet(
# tools=[
# Tool(
# type="function",
# function=FunctionDefinition(
# name="test",
# description="test",
# parameters=ParameterSchema(
# type="object",
# properties={
# "weather_tool": PropertySchema(
# type="string",
# description="Get the weather in a given location",
# )
# },
# required=["weather_tool"],
# ),
# ),
# ),
# ]
# )
# print(model.model_dump_json(indent=4))

@ -1,816 +0,0 @@
"""
Bell Labs Research Simulation with Physicist Agents
This simulation creates specialized AI agents representing famous physicists
from the Bell Labs era, including Oppenheimer, von Neumann, Feynman, Einstein,
and others. The agents work together in a collaborative research environment
following a structured workflow: task -> Oppenheimer (planning) -> physicist discussion
-> code implementation -> results analysis -> repeat for n loops.
"""
from functools import lru_cache
from typing import Any, Dict, List, Optional
from loguru import logger
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
# from examples.tools.claude_as_a_tool import developer_worker_agent
@lru_cache(maxsize=1)
def _create_physicist_agents(
model_name: str, random_model_name: bool = False
) -> List[Agent]:
"""
Create specialized agents for each physicist.
Args:
model_name: Model to use for all agents
Returns:
List of configured physicist agents
"""
physicists_data = {
"J. Robert Oppenheimer": {
"role": "Research Director & Theoretical Physicist",
"expertise": [
"Nuclear physics",
"Quantum mechanics",
"Research coordination",
"Strategic planning",
"Team leadership",
],
"background": "Director of the Manhattan Project, expert in quantum mechanics and nuclear physics",
"system_prompt": """You are J. Robert Oppenheimer, the brilliant theoretical physicist and research director.
Your role is to:
1. Analyze complex research questions and break them down into manageable components
2. Create comprehensive research plans with clear objectives and methodologies
3. Coordinate the research team and ensure effective collaboration
4. Synthesize findings from different physicists into coherent conclusions
5. Guide the research process with strategic insights and theoretical frameworks
You excel at:
- Identifying the core theoretical challenges in any research question
- Designing experimental approaches that test fundamental principles
- Balancing theoretical rigor with practical implementation
- Fostering interdisciplinary collaboration between specialists
- Maintaining focus on the most promising research directions
When creating research plans, be thorough, systematic, and consider multiple approaches.
Always emphasize the theoretical foundations and experimental validation of any proposed solution.""",
},
"John von Neumann": {
"role": "Mathematical Physicist & Computer Scientist",
"expertise": [
"Mathematical physics",
"Computer architecture",
"Game theory",
"Quantum mechanics",
"Numerical methods",
],
"background": "Pioneer of computer science, game theory, and mathematical physics",
"system_prompt": """You are John von Neumann, the brilliant mathematical physicist and computer scientist.
Your approach to research questions involves:
1. Mathematical rigor and formal mathematical frameworks
2. Computational and algorithmic solutions to complex problems
3. Game theory and strategic analysis of research approaches
4. Numerical methods and computational physics
5. Bridging abstract theory with practical implementation
You excel at:
- Formulating problems in precise mathematical terms
- Developing computational algorithms and numerical methods
- Applying game theory to optimize research strategies
- Creating mathematical models that capture complex phenomena
- Designing efficient computational approaches to physical problems
When analyzing research questions, focus on mathematical foundations, computational feasibility,
and the development of rigorous theoretical frameworks that can be implemented and tested.""",
},
"Richard Feynman": {
"role": "Theoretical Physicist & Problem Solver",
"expertise": [
"Quantum electrodynamics",
"Particle physics",
"Problem-solving methodology",
"Intuitive physics",
"Experimental design",
],
"background": "Nobel laureate in physics, known for intuitive problem-solving and quantum electrodynamics",
"system_prompt": """You are Richard Feynman, the brilliant theoretical physicist and master problem solver.
Your research methodology involves:
1. Intuitive understanding of complex physical phenomena
2. Creative problem-solving approaches that cut through complexity
3. Experimental design that tests fundamental principles
4. Clear communication of complex ideas through analogies and examples
5. Focus on the most essential aspects of any research question
You excel at:
- Finding elegant solutions to seemingly intractable problems
- Designing experiments that reveal fundamental truths
- Communicating complex physics in accessible terms
- Identifying the core physics behind any phenomenon
- Developing intuitive models that capture essential behavior
When approaching research questions, look for the simplest, most elegant solutions.
Focus on the fundamental physics and design experiments that test your understanding directly.""",
},
"Albert Einstein": {
"role": "Theoretical Physicist & Conceptual Innovator",
"expertise": [
"Relativity theory",
"Quantum mechanics",
"Conceptual physics",
"Thought experiments",
"Fundamental principles",
],
"background": "Revolutionary physicist who developed relativity theory and influenced quantum mechanics",
"system_prompt": """You are Albert Einstein, the revolutionary theoretical physicist and conceptual innovator.
Your research approach involves:
1. Deep conceptual thinking about fundamental physical principles
2. Thought experiments that reveal the essence of physical phenomena
3. Questioning established assumptions and exploring new paradigms
4. Focus on the most fundamental and universal aspects of physics
5. Intuitive understanding of space, time, and the nature of reality
You excel at:
- Identifying the conceptual foundations of any physical theory
- Developing thought experiments that challenge conventional wisdom
- Finding elegant mathematical descriptions of physical reality
- Questioning fundamental assumptions and exploring alternatives
- Developing unified theories that explain diverse phenomena
When analyzing research questions, focus on the conceptual foundations and fundamental principles.
Look for elegant, unified explanations and be willing to challenge established paradigms.""",
},
"Enrico Fermi": {
"role": "Experimental Physicist & Nuclear Scientist",
"expertise": [
"Nuclear physics",
"Experimental physics",
"Neutron physics",
"Statistical physics",
"Practical applications",
],
"background": "Nobel laureate known for nuclear physics, experimental work, and the first nuclear reactor",
"system_prompt": """You are Enrico Fermi, the brilliant experimental physicist and nuclear scientist.
Your research methodology involves:
1. Rigorous experimental design and execution
2. Practical application of theoretical principles
3. Statistical analysis and probability in physics
4. Nuclear physics and particle interactions
5. Bridging theory with experimental validation
You excel at:
- Designing experiments that test theoretical predictions
- Applying statistical methods to physical problems
- Developing practical applications of fundamental physics
- Nuclear physics and particle physics experiments
- Creating experimental setups that reveal new phenomena
When approaching research questions, focus on experimental design and practical implementation.
Emphasize the importance of experimental validation and statistical analysis in physics research.""",
},
"Code-Implementer": {
"role": "Computational Physicist & Code Developer",
"expertise": [
"Scientific computing",
"Physics simulations",
"Data analysis",
"Algorithm implementation",
"Numerical methods",
],
"background": "Specialized in implementing computational solutions to physics problems",
"system_prompt": """You are a specialized computational physicist and code developer.
Your responsibilities include:
1. Implementing computational solutions to physics problems
2. Developing simulations and numerical methods
3. Analyzing data and presenting results clearly
4. Testing theoretical predictions through computation
5. Providing quantitative analysis of research findings
You excel at:
- Writing clear, efficient scientific code
- Implementing numerical algorithms for physics problems
- Data analysis and visualization
- Computational optimization and performance
- Bridging theoretical physics with computational implementation
When implementing solutions, focus on:
- Clear, well-documented code
- Efficient numerical algorithms
- Comprehensive testing and validation
- Clear presentation of results and analysis
- Quantitative assessment of theoretical predictions""",
},
}
agents = []
for name, data in physicists_data.items():
agent = Agent(
agent_name=name,
system_prompt=data["system_prompt"],
model_name=model_name,
random_model_name=random_model_name,
max_loops=1,
dynamic_temperature_enabled=True,
dynamic_context_window=True,
)
agents.append(agent)
return agents
class BellLabsSwarm:
"""
Bell Labs Research Simulation Swarm
Simulates the collaborative research environment of Bell Labs with famous physicists
working together on complex research questions. The workflow follows:
1. Task is presented to the team
2. Oppenheimer creates a research plan
3. Physicists discuss and vote on approaches using majority voting
4. Code implementation agent tests the theory
5. Results are analyzed and fed back to the team
6. Process repeats for n loops with iterative refinement
"""
def __init__(
self,
name: str = "Bell Labs Research Team",
description: str = "A collaborative research environment simulating Bell Labs physicists",
max_loops: int = 1,
verbose: bool = True,
model_name: str = "gpt-4o-mini",
random_model_name: bool = False,
output_type: str = "str-all-except-first",
dynamic_context_window: bool = True,
**kwargs,
):
"""
Initialize the Bell Labs Research Swarm.
Args:
name: Name of the swarm
description: Description of the swarm's purpose
max_loops: Number of research iteration loops
verbose: Whether to enable verbose logging
model_name: Model to use for all agents
**kwargs: Additional arguments passed to BaseSwarm
"""
self.name = name
self.description = description
self.max_loops = max_loops
self.verbose = verbose
self.model_name = model_name
self.kwargs = kwargs
self.random_model_name = random_model_name
self.output_type = output_type
self.dynamic_context_window = dynamic_context_window
self.conversation = Conversation(
dynamic_context_window=dynamic_context_window
)
# Create the physicist agents
self.agents = _create_physicist_agents(
model_name=model_name, random_model_name=random_model_name
)
# Set up specialized agents
self.oppenheimer = self._get_agent_by_name(
"J. Robert Oppenheimer"
)
self.code_implementer = self._get_agent_by_name(
"Code-Implementer"
)
self.physicists = [
agent
for agent in self.agents
if agent.agent_name != "J. Robert Oppenheimer"
and agent.agent_name != "Code-Implementer"
]
# # Find the code implementer agent
# code_implementer = self._get_agent_by_name("Code-Implementer")
# code_implementer.tools = [developer_worker_agent]
logger.info(
f"Bell Labs Research Team initialized with {len(self.agents)} agents"
)
def _get_agent_by_name(self, name: str) -> Optional[Agent]:
"""Get an agent by name."""
for agent in self.agents:
if agent.agent_name == name:
return agent
return None
def run(
self, task: str, img: Optional[str] = None
) -> Dict[str, Any]:
"""
Run the Bell Labs research simulation.
Args:
task: The research question or task to investigate
Returns:
Dictionary containing the research results, process history, and full conversation
"""
logger.info(f"Starting Bell Labs research on: {task}")
# Add initial task to conversation history
self.conversation.add(
"Research Coordinator", f"Initial Research Task: {task}"
)
# Oppenheimer
oppenheimer_plan = self.oppenheimer.run(
task=self.conversation.get_str(), img=img
)
self.conversation.add(
self.oppenheimer.agent_name,
f"Research Plan: {oppenheimer_plan}",
)
# Discussion
# Physicists
physicist_discussion = self._conduct_physicist_discussion(
task, self.conversation.get_str()
)
# Add to conversation history
self.conversation.add(
"Group Discussion", physicist_discussion
)
# Now implement the solution
implementation_results = self._implement_and_test_solution(
history=self.conversation.get_str()
)
# Add to conversation history
self.conversation.add(
self.code_implementer.agent_name, implementation_results
)
return history_output_formatter(
conversation=self.conversation, type="str"
)
def _create_research_plan(
self, task: str, loop_number: int
) -> str:
"""
Have Oppenheimer create a research plan.
Args:
task: Research task
loop_number: Current loop number
Returns:
Research plan from Oppenheimer
"""
prompt = f"""
Research Task: {task}
Loop Number: {loop_number + 1}
As J. Robert Oppenheimer, create a comprehensive research plan for this task.
Your plan should include:
1. Clear research objectives and hypotheses
2. Theoretical framework and approach
3. Specific research questions to investigate
4. Methodology for testing and validation
5. Expected outcomes and success criteria
6. Timeline and milestones
7. Resource requirements and team coordination
Provide a detailed, actionable plan that the research team can follow.
"""
plan = self.oppenheimer.run(prompt)
return plan
def _conduct_physicist_discussion(
self, task: str, history: str
) -> str:
"""
Conduct a natural discussion among physicists where they build on each other's ideas.
Args:
task: Research task
history: Conversation history including Oppenheimer's plan
Returns:
Results of the physicist discussion as a conversation transcript
"""
import random
# Shuffle the physicists to create random discussion order
discussion_order = self.physicists.copy()
random.shuffle(discussion_order)
discussion_transcript = []
current_context = (
f"{history}\n\nCurrent Research Task: {task}\n\n"
)
# Each physicist contributes to the discussion, building on previous contributions
for i, physicist in enumerate(discussion_order):
if i == 0:
# First physicist starts the discussion
discussion_prompt = f"""
{current_context}
As {physicist.agent_name}, you are starting the group discussion about this research plan.
Based on your expertise, provide your initial thoughts on:
1. What aspects of Oppenheimer's research plan do you find most promising?
2. What theoretical challenges or concerns do you see?
3. What specific approaches would you recommend based on your expertise?
4. What questions or clarifications do you have for the team?
Be specific and draw from your unique perspective and expertise. This will set the tone for the group discussion.
"""
else:
# Subsequent physicists build on the discussion
previous_contributions = "\n\n".join(
discussion_transcript
)
discussion_prompt = f"""
{current_context}
Previous Discussion:
{previous_contributions}
As {physicist.agent_name}, continue the group discussion by building on your colleagues' ideas.
Consider:
1. How do your colleagues' perspectives relate to your expertise in {', '.join(physicist.expertise)}?
2. What additional insights can you add to the discussion?
3. How can you address any concerns or questions raised by others?
4. What specific next steps would you recommend based on the discussion so far?
Engage directly with your colleagues' ideas and contribute your unique perspective to move the research forward.
"""
# Get the physicist's contribution
contribution = physicist.run(discussion_prompt)
# Add to transcript with clear attribution
discussion_transcript.append(
f"{physicist.agent_name}: {contribution}"
)
# Update context for next iteration
current_context = (
f"{history}\n\nCurrent Research Task: {task}\n\nGroup Discussion:\n"
+ "\n\n".join(discussion_transcript)
)
# Create a summary of the discussion
summary_prompt = f"""
Research Task: {task}
Complete Discussion Transcript:
{chr(10).join(discussion_transcript)}
As a research coordinator, provide a concise summary of the key points from this group discussion:
1. Main areas of agreement among the physicists
2. Key concerns or challenges identified
3. Specific recommendations made by the team
4. Next steps for moving forward with the research
Focus on actionable insights and clear next steps that the team can implement.
"""
# Use Oppenheimer to summarize the discussion
discussion_summary = self.oppenheimer.run(summary_prompt)
# Return the full discussion transcript with summary
full_discussion = f"Group Discussion Transcript:\n\n{chr(10).join(discussion_transcript)}\n\n---\nDiscussion Summary:\n{discussion_summary}"
return full_discussion
def _implement_and_test_solution(
self,
history: str,
) -> Dict[str, Any]:
"""
Implement and test the proposed solution.
Args:
task: Research task
plan: Research plan
discussion_results: Results from physicist discussion
loop_number: Current loop number
Returns:
Implementation and testing results
"""
implementation_prompt = f"""
{history}
As the Code Implementer, your task is to:
1. Implement a computational solution based on the research plan
2. Test the theoretical predictions through simulation or calculation
3. Analyze the results and provide quantitative assessment
4. Identify any discrepancies between theory and implementation
5. Suggest improvements or next steps
Provide:
- Clear description of your implementation approach
- Code or algorithm description
- Test results and analysis
- Comparison with theoretical predictions
- Recommendations for further investigation
Focus on practical implementation and quantitative results.
"""
implementation_results = self.code_implementer.run(
implementation_prompt
)
return implementation_results
def _analyze_results(
self, implementation_results: Dict[str, Any], loop_number: int
) -> str:
"""
Analyze the results and provide team review.
Args:
implementation_results: Results from implementation phase
loop_number: Current loop number
Returns:
Analysis and recommendations
"""
analysis_prompt = f"""
Implementation Results: {implementation_results}
Loop Number: {loop_number + 1}
As the research team, analyze these results and provide:
1. Assessment of whether the implementation supports the theoretical predictions
2. Identification of any unexpected findings or discrepancies
3. Evaluation of the methodology and approach
4. Recommendations for the next research iteration
5. Insights gained from this round of investigation
Consider:
- What worked well in this approach?
- What challenges or limitations were encountered?
- How can the research be improved in the next iteration?
- What new questions or directions have emerged?
Provide a comprehensive analysis that will guide the next research phase.
"""
# Use team discussion for results analysis
analysis_results = self._conduct_team_analysis(
analysis_prompt
)
return analysis_results
def _conduct_team_analysis(self, analysis_prompt: str) -> str:
"""
Conduct a team analysis discussion using the same approach as physicist discussion.
Args:
analysis_prompt: The prompt for the analysis
Returns:
Results of the team analysis discussion
"""
import random
# Shuffle the agents to create random discussion order
discussion_order = self.agents.copy()
random.shuffle(discussion_order)
discussion_transcript = []
current_context = analysis_prompt
# Each agent contributes to the analysis, building on previous contributions
for i, agent in enumerate(discussion_order):
if i == 0:
# First agent starts the analysis
agent_prompt = f"""
{current_context}
As {agent.agent_name}, you are starting the team analysis discussion.
Based on your expertise and role, provide your initial analysis of the implementation results.
Focus on what you can contribute from your unique perspective.
"""
else:
# Subsequent agents build on the analysis
previous_contributions = "\n\n".join(
discussion_transcript
)
agent_prompt = f"""
{current_context}
Previous Analysis:
{previous_contributions}
As {agent.agent_name}, continue the team analysis by building on your colleagues' insights.
Consider:
1. How do your colleagues' perspectives relate to your expertise?
2. What additional insights can you add to the analysis?
3. How can you address any concerns or questions raised by others?
4. What specific recommendations would you make based on the analysis so far?
Engage directly with your colleagues' ideas and contribute your unique perspective.
"""
# Get the agent's contribution
contribution = agent.run(agent_prompt)
# Add to transcript with clear attribution
discussion_transcript.append(
f"{agent.agent_name}: {contribution}"
)
# Update context for next iteration
current_context = (
f"{analysis_prompt}\n\nTeam Analysis:\n"
+ "\n\n".join(discussion_transcript)
)
# Create a summary of the analysis
summary_prompt = f"""
Analysis Prompt: {analysis_prompt}
Complete Analysis Transcript:
{chr(10).join(discussion_transcript)}
As a research coordinator, provide a concise summary of the key points from this team analysis:
1. Main findings and insights from the team
2. Key recommendations made
3. Areas of agreement and disagreement
4. Next steps for the research
Focus on actionable insights and clear next steps.
"""
# Use Oppenheimer to summarize the analysis
analysis_summary = self.oppenheimer.run(summary_prompt)
# Return the full analysis transcript with summary
full_analysis = f"Team Analysis Transcript:\n\n{chr(10).join(discussion_transcript)}\n\n---\nAnalysis Summary:\n{analysis_summary}"
return full_analysis
def _refine_task_for_next_iteration(
self, current_task: str, loop_results: Dict[str, Any]
) -> str:
"""
Refine the task for the next research iteration.
Args:
current_task: Current research task
loop_results: Results from the current loop
Returns:
Refined task for next iteration
"""
refinement_prompt = f"""
Current Research Task: {current_task}
Results from Current Loop: {loop_results}
Based on the findings and analysis from this research loop, refine the research task for the next iteration.
Consider:
- What new questions have emerged?
- What aspects need deeper investigation?
- What alternative approaches should be explored?
- What specific hypotheses should be tested?
Provide a refined, focused research question that builds upon the current findings
and addresses the most important next steps identified by the team.
"""
# Use Oppenheimer to refine the task
refined_task = self.oppenheimer.run(refinement_prompt)
# Add task refinement to conversation history
self.conversation.add(
"J. Robert Oppenheimer",
f"Task Refined for Next Iteration: {refined_task}",
)
return refined_task
def _generate_final_conclusion(
self, research_results: Dict[str, Any]
) -> str:
"""
Generate a final conclusion summarizing all research findings.
Args:
research_results: Complete research results from all loops
Returns:
Final research conclusion
"""
conclusion_prompt = f"""
Complete Research Results: {research_results}
As J. Robert Oppenheimer, provide a comprehensive final conclusion for this research project.
Your conclusion should:
1. Summarize the key findings from all research loops
2. Identify the most significant discoveries or insights
3. Evaluate the success of the research approach
4. Highlight any limitations or areas for future investigation
5. Provide a clear statement of what was accomplished
6. Suggest next steps for continued research
Synthesize the work of the entire team and provide a coherent narrative
of the research journey and its outcomes.
"""
final_conclusion = self.oppenheimer.run(conclusion_prompt)
return final_conclusion
# Example usage function
def run_bell_labs_research(
research_question: str,
max_loops: int = 3,
model_name: str = "gpt-4o-mini",
verbose: bool = True,
) -> Dict[str, Any]:
"""
Run a Bell Labs research simulation.
Args:
research_question: The research question to investigate
max_loops: Number of research iteration loops
model_name: Model to use for all agents
verbose: Whether to enable verbose logging
Returns:
Complete research results and findings
"""
bell_labs = BellLabsSwarm(
max_loops=max_loops, verbose=verbose, model_name=model_name
)
results = bell_labs.run(research_question)
return results
# if __name__ == "__main__":
# # Example research question
# research_question = """
# Investigate the feasibility of quantum computing for solving complex optimization problems.
# Consider both theoretical foundations and practical implementation challenges.
# """
# print("Starting Bell Labs Research Simulation...")
# print(f"Research Question: {research_question}")
# print("-" * 80)
# results = run_bell_labs_research(
# research_question=research_question,
# max_loops=2,
# verbose=True
# )
# print("\n" + "=" * 80)
# print("RESEARCH SIMULATION COMPLETED")
# print("=" * 80)
# print(f"\nFinal Conclusion:\n{results['final_conclusion']}")
# print(f"\nResearch completed in {len(results['research_history'])} loops.")
# print("Check the results dictionary for complete research details.")

@ -1,253 +0,0 @@
from swarms.structs.agent import Agent
from typing import List
from swarms.structs.conversation import Conversation
import uuid
import random
from loguru import logger
from typing import Optional
class QASwarm:
"""
A Question and Answer swarm system where random agents ask questions to speaker agents.
This system allows for dynamic Q&A sessions where:
- Multiple agents can act as questioners
- One or multiple agents can act as speakers/responders
- Questions are asked randomly by different agents
- The conversation is tracked and managed
- Agents are showcased to each other with detailed information
"""
def __init__(
self,
name: str = "QandA",
description: str = "Question and Answer Swarm System",
agents: List[Agent] = None,
speaker_agents: List[Agent] = None,
id: str = str(uuid.uuid4()),
max_loops: int = 5,
show_dashboard: bool = True,
speaker_agent: Agent = None,
showcase_agents: bool = True,
**kwargs,
):
self.id = id
self.name = name
self.description = description
self.max_loops = max_loops
self.show_dashboard = show_dashboard
self.agents = agents or []
self.speaker_agents = speaker_agents or []
self.kwargs = kwargs
self.speaker_agent = speaker_agent
self.showcase_agents = showcase_agents
self.conversation = Conversation()
# Validate setup
self._validate_setup()
def _validate_setup(self):
"""Validate that the Q&A system is properly configured."""
if not self.agents:
logger.warning(
"No questioner agents provided. Add agents using add_agent() method."
)
if not self.speaker_agents and not self.speaker_agent:
logger.warning(
"No speaker agents provided. Add speaker agents using add_speaker_agent() method."
)
if (
not self.agents
and not self.speaker_agents
and not self.speaker_agent
):
raise ValueError(
"At least one agent (questioner or speaker) must be provided."
)
def add_agent(self, agent: Agent):
"""Add a questioner agent to the swarm."""
self.agents.append(agent)
logger.info(f"Added questioner agent: {agent.agent_name}")
def add_speaker_agent(self, agent: Agent):
"""Add a speaker agent to the swarm."""
if self.speaker_agents is None:
self.speaker_agents = []
self.speaker_agents.append(agent)
logger.info(f"Added speaker agent: {agent.agent_name}")
def get_agent_info(self, agent: Agent) -> dict:
"""Extract key information about an agent for showcasing."""
info = {
"name": getattr(agent, "agent_name", "Unknown Agent"),
"description": getattr(
agent, "agent_description", "No description available"
),
"role": getattr(agent, "role", "worker"),
}
# Get system prompt preview (first 50 characters)
system_prompt = getattr(agent, "system_prompt", "")
if system_prompt:
info["system_prompt_preview"] = (
system_prompt[:50] + "..."
if len(system_prompt) > 50
else system_prompt
)
else:
info["system_prompt_preview"] = (
"No system prompt available"
)
return info
def showcase_speaker_to_questioner(
self, questioner: Agent, speaker: Agent
) -> str:
"""Create a showcase prompt introducing the speaker agent to the questioner."""
speaker_info = self.get_agent_info(speaker)
showcase_prompt = f"""
You are about to ask a question to a specialized agent. Here's what you need to know about them:
**Speaker Agent Information:**
- **Name**: {speaker_info['name']}
- **Role**: {speaker_info['role']}
- **Description**: {speaker_info['description']}
- **System Prompt Preview**: {speaker_info['system_prompt_preview']}
Please craft a thoughtful, relevant question that takes into account this agent's expertise and background.
Your question should be specific and demonstrate that you understand their role and capabilities.
"""
return showcase_prompt
def showcase_questioner_to_speaker(
self, speaker: Agent, questioner: Agent
) -> str:
"""Create a showcase prompt introducing the questioner agent to the speaker."""
questioner_info = self.get_agent_info(questioner)
showcase_prompt = f"""
You are about to answer a question from another agent. Here's what you need to know about them:
**Questioner Agent Information:**
- **Name**: {questioner_info['name']}
- **Role**: {questioner_info['role']}
- **Description**: {questioner_info['description']}
- **System Prompt Preview**: {questioner_info['system_prompt_preview']}
Please provide a comprehensive answer that demonstrates your expertise and addresses their question thoroughly.
Consider their background and role when formulating your response.
"""
return showcase_prompt
def random_select_agent(self, agents: List[Agent]) -> Agent:
"""Randomly select an agent from the list."""
if not agents:
raise ValueError("No agents available for selection")
return random.choice(agents)
def get_current_speaker(self) -> Agent:
"""Get the current speaker agent (either from speaker_agents list or single speaker_agent)."""
if self.speaker_agent:
return self.speaker_agent
elif self.speaker_agents:
return self.random_select_agent(self.speaker_agents)
else:
raise ValueError("No speaker agent available")
def run(
self, task: str, img: Optional[str] = None, *args, **kwargs
):
"""Run the Q&A session with agent showcasing."""
self.conversation.add(role="user", content=task)
# Get current speaker
current_speaker = self.get_current_speaker()
# Select a random questioner
questioner = self.random_select_agent(self.agents)
# Showcase agents to each other if enabled
if self.showcase_agents:
# Showcase speaker to questioner
speaker_showcase = self.showcase_speaker_to_questioner(
questioner, current_speaker
)
questioner_task = f"{speaker_showcase}\n\nNow ask a question about: {task}"
# Showcase questioner to speaker
questioner_showcase = self.showcase_questioner_to_speaker(
current_speaker, questioner
)
else:
questioner_task = f"Ask a question about {task} to {current_speaker.agent_name}"
# Generate question
question = questioner.run(
task=questioner_task,
img=img,
*args,
**kwargs,
)
self.conversation.add(
role=questioner.agent_name, content=question
)
# Prepare answer task with showcasing if enabled
if self.showcase_agents:
answer_task = f"{questioner_showcase}\n\nAnswer this question from {questioner.agent_name}: {question}"
else:
answer_task = f"Answer the question '{question}' from {questioner.agent_name}"
# Generate answer
answer = current_speaker.run(
task=answer_task,
img=img,
*args,
**kwargs,
)
self.conversation.add(
role=current_speaker.agent_name, content=answer
)
return answer
def run_multi_round(
self,
task: str,
rounds: int = 3,
img: Optional[str] = None,
*args,
**kwargs,
):
"""Run multiple rounds of Q&A with different questioners."""
results = []
for round_num in range(rounds):
logger.info(
f"Starting Q&A round {round_num + 1}/{rounds}"
)
round_result = self.run(task, img, *args, **kwargs)
results.append(
{"round": round_num + 1, "result": round_result}
)
return results
def get_conversation_history(self):
"""Get the conversation history."""
return self.conversation.get_history()
def clear_conversation(self):
"""Clear the conversation history."""
self.conversation = Conversation()
logger.info("Conversation history cleared")

@ -1,191 +0,0 @@
from pydantic.v1 import BaseModel
from typing import List, Callable
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="swarm_registry")
class SwarmRegistry(BaseModel):
swarm_pool: List[Callable] = []
def add(self, swarm: Callable, *args, **kwargs):
"""
Adds a swarm to the registry.
Args:
swarm (Callable): The swarm to add to the registry.
"""
self.swarm_pool.append(swarm, *args, **kwargs)
def query(self, swarm_name: str) -> Callable:
"""
Queries the registry for a swarm by name.
Args:
swarm_name (str): The name of the swarm to query.
Returns:
Callable: The swarm function corresponding to the given name.
"""
if not self.swarm_pool:
raise ValueError("No swarms found in registry")
if not swarm_name:
raise ValueError("No swarm name provided.")
for swarm in self.swarm_pool:
if swarm.__name__ == swarm_name:
name = swarm.__name__
description = (
swarm.__doc__.strip().split("\n")[0]
or swarm.description
)
agent_count = len(swarm.agents)
task_count = len(swarm.tasks)
log = f"Swarm: {name}\nDescription: {description}\nAgents: {agent_count}\nTasks: {task_count}"
logger.info(log)
return swarm
raise ValueError(
f"Swarm '{swarm_name}' not found in registry."
)
def remove(self, swarm_name: str):
"""
Removes a swarm from the registry by name.
Args:
swarm_name (str): The name of the swarm to remove.
"""
for swarm in self.swarm_pool:
if swarm.__name__ == swarm_name:
self.swarm_pool.remove(swarm)
return
raise ValueError(
f"Swarm '{swarm_name}' not found in registry."
)
def list_swarms(self) -> List[str]:
"""
Lists the names of all swarms in the registry.
Returns:
List[str]: A list of swarm names.
"""
if not self.swarm_pool:
raise ValueError("No swarms found in registry.")
for swarm in self.swarm_pool:
name = swarm.__name__
description = (
swarm.__doc__.strip().split("\n")[0]
or swarm.description
)
agent_count = len(swarm.agents)
task_count = len(swarm.tasks)
log = f"Swarm: {name}\nDescription: {description}\nAgents: {agent_count}\nTasks: {task_count}"
logger.info(log)
return [swarm.__name__ for swarm in self.swarm_pool]
def run(self, swarm_name: str, *args, **kwargs):
"""
Runs a swarm by name with the given arguments.
Args:
swarm_name (str): The name of the swarm to run.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
Any: The result of running the swarm.
"""
swarm = self.query(swarm_name)
return swarm(*args, **kwargs)
def add_list_of_swarms(self, swarms: List[Callable]):
"""
Adds a list of swarms to the registry.
Args:
swarms (List[Callable]): A list of swarms to add to the registry.
"""
for swarm in swarms:
self.add(swarm)
return self.swarm_pool
def query_multiple_of_swarms(
self, swarm_names: List[str]
) -> List[Callable]:
"""
Queries the registry for multiple swarms by name.
Args:
swarm_names (List[str]): A list of swarm names to query.
Returns:
List[Callable]: A list of swarm functions corresponding to the given names.
"""
return [self.query(swarm_name) for swarm_name in swarm_names]
def remove_list_of_swarms(self, swarm_names: List[str]):
"""
Removes a list of swarms from the registry by name.
Args:
swarm_names (List[str]): A list of swarm names to remove.
"""
for swarm_name in swarm_names:
self.remove(swarm_name)
return self.swarm_pool
def run_multiple_of_swarms(
self, swarm_names: List[str], *args, **kwargs
):
"""
Runs a list of swarms by name with the given arguments.
Args:
swarm_names (List[str]): A list of swarm names to run.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
List[Any]: A list of results of running the swarms.
"""
return [
self.run(swarm_name, *args, **kwargs)
for swarm_name in swarm_names
]
# Decorator to add a function to the registry
def swarm_registry():
"""
Decorator to add a function to the registry.
Args:
swarm_registry (SwarmRegistry): The swarm registry instance.
Returns:
Callable: The decorated function.
"""
def decorator(func, *args, **kwargs):
try:
swarm_registry = SwarmRegistry()
swarm_registry.add(func, *args, **kwargs)
logger.info(
f"Added swarm '{func.__name__}' to the registry."
)
return func
except Exception as e:
logger.error(str(e))
raise
return decorator

@ -546,8 +546,6 @@ class SwarmRouter:
description=self.description,
agents=self.agents,
max_loops=self.max_loops,
auto_save=self.autosave,
return_str_on=self.return_entire_history,
output_type=self.output_type,
*args,
**kwargs,

@ -1,8 +1,4 @@
from swarms.tools.base_tool import BaseTool
from swarms.tools.cohere_func_call_schema import (
CohereFuncSchema,
ParameterDefinition,
)
from swarms.tools.json_utils import base_model_to_json
from swarms.tools.mcp_client_tools import (
_create_server_tool_mapping,
@ -56,8 +52,6 @@ __all__ = [
"ToolFunction",
"tool",
"BaseTool",
"CohereFuncSchema",
"ParameterDefinition",
"ToolStorage",
"tool_registry",
"base_model_to_json",

@ -1,18 +0,0 @@
from pydantic import BaseModel, Field
from typing import Dict
class ParameterDefinition(BaseModel):
description: str = Field(
..., title="Description of the parameter"
)
type: str = Field(..., title="Type of the parameter")
required: bool = Field(..., title="Is the parameter required?")
class CohereFuncSchema(BaseModel):
name: str = Field(..., title="Name of the tool")
description: str = Field(..., title="Description of the tool")
parameter_definitions: Dict[str, ParameterDefinition] = Field(
..., title="Parameter definitions for the tool"
)

@ -1,9 +1,9 @@
import json
from typing import List, Any, Callable
import re
from typing import Any, Callable, List
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.parse_code import extract_code_from_markdown
logger = initialize_logger(log_folder="tool_parse_exec")

@ -1,88 +0,0 @@
# Best LLM Models by Task Type
# Simplified dictionary structure with model names and categories
best_models = {
"Vision": [
{"model": "gemini/gemini-2.5-pro", "category": "Vision"},
],
"text-generation": [
{
"model": "claude-sonnet-4-20250514",
"category": "text-generation",
},
{"model": "gpt-5-chat", "category": "text-generation"},
],
}
# Function to get all models for a task type
def get_models_by_task(task_type: str) -> list:
"""
Get all models for a specific task type.
Args:
task_type (str): The task category (e.g., 'WebDev', 'Vision', 'text-generation')
Returns:
list: List of all models for the task type
"""
if task_type not in best_models:
raise ValueError(
f"Task type '{task_type}' not found. Available types: {list(best_models.keys())}"
)
return best_models[task_type]
# Function to get the first model for a task type (simplified from get_top_model)
def get_first_model(task_type: str) -> dict:
"""
Get the first model for a specific task type.
Args:
task_type (str): The task category (e.g., 'WebDev', 'Vision', 'text-generation')
Returns:
dict: First model information with model name and category
"""
if task_type not in best_models:
raise ValueError(
f"Task type '{task_type}' not found. Available types: {list(best_models.keys())}"
)
models = best_models[task_type]
if not models:
raise ValueError(
f"No models found for task type '{task_type}'"
)
return models[0]
# Function to search for a specific model across all categories
def find_model_by_name(model_name: str) -> dict:
"""
Find a model by name across all task categories.
Args:
model_name (str): The model name to search for
Returns:
dict: Model information if found, None otherwise
"""
for task_type, models in best_models.items():
for model in models:
if model["model"].lower() == model_name.lower():
return model
return None
# Function to get all available task types
def get_available_task_types() -> list:
"""
Get all available task types/categories.
Returns:
list: List of all task type names
"""
return list(best_models.keys())

@ -1,5 +0,0 @@
def litellm_check_for_tools(model_name: str):
"""Check if the model supports tools."""
from litellm.utils import supports_function_calling
return supports_function_calling(model_name)
Loading…
Cancel
Save