|
|
|
@ -14,6 +14,7 @@ from swarms.utils.litellm_wrapper import LiteLLM
|
|
|
|
|
from litellm import models_by_provider
|
|
|
|
|
from dotenv import set_key, find_dotenv
|
|
|
|
|
import logging # Import the logging module
|
|
|
|
|
import litellm # Import litellm exception
|
|
|
|
|
|
|
|
|
|
# Initialize logger
|
|
|
|
|
load_dotenv()
|
|
|
|
@ -21,7 +22,6 @@ load_dotenv()
|
|
|
|
|
# Initialize logger
|
|
|
|
|
logger = initialize_logger(log_folder="swarm_ui")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Define the path to agent_prompts.json
|
|
|
|
|
PROMPT_JSON_PATH = os.path.join(
|
|
|
|
|
os.path.dirname(os.path.abspath(__file__)), "agent_prompts.json"
|
|
|
|
@ -86,9 +86,9 @@ def load_prompts_from_json() -> Dict[str, str]:
|
|
|
|
|
"Agent-Onboarding_Agent": "You are an onboarding agent...",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
AGENT_PROMPTS = load_prompts_from_json()
|
|
|
|
|
|
|
|
|
|
api_keys = {}
|
|
|
|
|
|
|
|
|
|
def initialize_agents(
|
|
|
|
|
dynamic_temp: float,
|
|
|
|
@ -118,6 +118,24 @@ def initialize_agents(
|
|
|
|
|
counter += 1
|
|
|
|
|
seen_names.add(agent_name)
|
|
|
|
|
|
|
|
|
|
# Set API key using os.environ temporarily
|
|
|
|
|
if provider == "openai":
|
|
|
|
|
os.environ["OPENAI_API_KEY"] = api_key
|
|
|
|
|
elif provider == "anthropic":
|
|
|
|
|
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
|
|
|
elif provider == "cohere":
|
|
|
|
|
os.environ["COHERE_API_KEY"] = api_key
|
|
|
|
|
elif provider == "gemini":
|
|
|
|
|
os.environ["GEMINI_API_KEY"] = api_key
|
|
|
|
|
elif provider == "mistral":
|
|
|
|
|
os.environ["MISTRAL_API_KEY"] = api_key
|
|
|
|
|
elif provider == "groq":
|
|
|
|
|
os.environ["GROQ_API_KEY"] = api_key
|
|
|
|
|
elif provider == "perplexity":
|
|
|
|
|
os.environ["PERPLEXITY_API_KEY"] = api_key
|
|
|
|
|
# Add other providers and their environment variable names as needed
|
|
|
|
|
|
|
|
|
|
# Create LiteLLM instance (Now it will read from os.environ)
|
|
|
|
|
llm = LiteLLM(
|
|
|
|
|
model_name=model_name,
|
|
|
|
|
system_prompt=agent_prompt,
|
|
|
|
@ -140,10 +158,9 @@ def initialize_agents(
|
|
|
|
|
output_type="string", # here is the output type which is string
|
|
|
|
|
temperature=dynamic_temp,
|
|
|
|
|
)
|
|
|
|
|
print(
|
|
|
|
|
f"Agent created: {agent.agent_name}"
|
|
|
|
|
) # Debug: Print agent name
|
|
|
|
|
print(f"Agent created: {agent.agent_name}")
|
|
|
|
|
agents.append(agent)
|
|
|
|
|
|
|
|
|
|
logger.info(f"Agents initialized successfully: {[agent.agent_name for agent in agents]}")
|
|
|
|
|
return agents
|
|
|
|
|
except Exception as e:
|
|
|
|
@ -245,7 +262,6 @@ async def execute_task(
|
|
|
|
|
yield "Flow configuration is required for AgentRearrange", gr.update(visible=True), ""
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Generate unique agent names in the flow
|
|
|
|
|
flow_agents = []
|
|
|
|
|
used_agent_names = set()
|
|
|
|
@ -265,7 +281,6 @@ async def execute_task(
|
|
|
|
|
router_kwargs["flow"] = flow
|
|
|
|
|
router_kwargs["output_type"] = "string" # Changed output type here
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if swarm_type == "MixtureOfAgents":
|
|
|
|
|
if len(agents) < 2:
|
|
|
|
|
logger.error("MixtureOfAgents requires at least 2 agents.")
|
|
|
|
@ -403,6 +418,14 @@ async def execute_task(
|
|
|
|
|
logger.error(f"Task execution timed out after {timeout} seconds", exc_info=True)
|
|
|
|
|
yield f"Task execution timed out after {timeout} seconds", gr.update(visible=True), ""
|
|
|
|
|
return
|
|
|
|
|
except litellm.exceptions.APIError as e: # Catch litellm APIError
|
|
|
|
|
logger.error(f"LiteLLM API Error: {e}", exc_info=True)
|
|
|
|
|
yield f"LiteLLM API Error: {e}", gr.update(visible=True), ""
|
|
|
|
|
return
|
|
|
|
|
except litellm.exceptions.AuthenticationError as e: # Catch litellm AuthenticationError
|
|
|
|
|
logger.error(f"LiteLLM Authentication Error: {e}", exc_info=True)
|
|
|
|
|
yield f"LiteLLM Authentication Error: {e}", gr.update(visible=True), ""
|
|
|
|
|
return
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"Error executing task: {e}", exc_info=True)
|
|
|
|
|
yield f"Error executing task: {e}", gr.update(visible=True), ""
|
|
|
|
@ -419,7 +442,6 @@ async def execute_task(
|
|
|
|
|
finally:
|
|
|
|
|
logger.info(f"Task execution finished for: {task} with swarm type: {swarm_type}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def format_output(data:Optional[str], swarm_type:str, error_display=None) -> str:
|
|
|
|
|
if data is None:
|
|
|
|
|
return "Error : No output from the swarm."
|
|
|
|
@ -433,7 +455,7 @@ def format_output(data:Optional[str], swarm_type:str, error_display=None) -> str
|
|
|
|
|
if os.path.exists(data):
|
|
|
|
|
return parse_spreadsheet_swarm_output(data, error_display)
|
|
|
|
|
else:
|
|
|
|
|
return parse_json_output(data, error_display)
|
|
|
|
|
return data # Directly return JSON response
|
|
|
|
|
elif swarm_type == "auto":
|
|
|
|
|
return parse_auto_swarm_output(data, error_display)
|
|
|
|
|
else:
|
|
|
|
@ -552,14 +574,14 @@ def parse_auto_swarm_output(data: Optional[str], error_display=None) -> str:
|
|
|
|
|
output += f"Agent Flow: `{agent_flow}`\n\n---\n"
|
|
|
|
|
output += f"Agent Task Execution\n\n"
|
|
|
|
|
|
|
|
|
|
# Handle nested MixtureOfAgents data
|
|
|
|
|
# Handle nested MixtureOfAgents data or other swarm type data
|
|
|
|
|
if (
|
|
|
|
|
"outputs" in parsed_data
|
|
|
|
|
and isinstance(parsed_data["outputs"], list)
|
|
|
|
|
and parsed_data["outputs"]
|
|
|
|
|
and isinstance(parsed_data["outputs"][0], dict)
|
|
|
|
|
and parsed_data["outputs"][0].get("agent_name") == "auto"
|
|
|
|
|
):
|
|
|
|
|
if parsed_data["outputs"][0].get("agent_name") == "auto":
|
|
|
|
|
mixture_data = parsed_data["outputs"][0].get("steps", [])
|
|
|
|
|
if mixture_data and isinstance(mixture_data[0], dict) and "content" in mixture_data[0]:
|
|
|
|
|
try:
|
|
|
|
@ -611,6 +633,9 @@ def parse_auto_swarm_output(data: Optional[str], error_display=None) -> str:
|
|
|
|
|
content = step["content"]
|
|
|
|
|
output += f"Step {(3-j)}:\n"
|
|
|
|
|
output += f"Response : {content}\n\n"
|
|
|
|
|
else:
|
|
|
|
|
logger.error("Error: 'outputs' data is not in the expected format.")
|
|
|
|
|
return "Error: 'outputs' data is not in the expected format."
|
|
|
|
|
|
|
|
|
|
output += f"Overall Completion Time: `{overall_time}`"
|
|
|
|
|
|
|
|
|
@ -805,7 +830,6 @@ def parse_agent_rearrange_output(data: Optional[str], error_display=None) -> str
|
|
|
|
|
logger.error(f"Error during parsing AgentRearrange output: {e}", exc_info=True)
|
|
|
|
|
return f"Error during parsing: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_mixture_of_agents_output(data: Optional[str], error_display=None) -> str:
|
|
|
|
|
"""Parses the MixtureOfAgents output string and formats it for display."""
|
|
|
|
|
logger.info("Parsing MixtureOfAgents output...")
|
|
|
|
@ -922,7 +946,6 @@ def parse_mixture_of_agents_output(data: Optional[str], error_display=None) -> s
|
|
|
|
|
logger.error(f"Error during parsing MixtureOfAgents output: {e}", exc_info=True)
|
|
|
|
|
return f"Error during parsing: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_sequential_workflow_output(data: Optional[str], error_display=None) -> str:
|
|
|
|
|
"""Parses the SequentialWorkflow output string and formats it for display."""
|
|
|
|
|
logger.info("Parsing SequentialWorkflow output...")
|
|
|
|
@ -1259,6 +1282,9 @@ class UI:
|
|
|
|
|
- MixtureOfAgents: Combines multiple agents with an aggregator
|
|
|
|
|
- SpreadSheetSwarm: Specialized for spreadsheet operations
|
|
|
|
|
- Auto: Automatically determines optimal workflow
|
|
|
|
|
|
|
|
|
|
**Note:**
|
|
|
|
|
Spreasheet swarm saves data in csv, will work in local setup !
|
|
|
|
|
"""
|
|
|
|
|
)
|
|
|
|
|
return gr.Column()
|
|
|
|
@ -1596,6 +1622,8 @@ def create_app():
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return f"Error saving agent prompt {str(e)}"
|
|
|
|
|
|
|
|
|
|
# In the run_task_wrapper function, modify the API key handling
|
|
|
|
|
|
|
|
|
|
async def run_task_wrapper(
|
|
|
|
|
task,
|
|
|
|
|
max_loops,
|
|
|
|
@ -1614,7 +1642,6 @@ def create_app():
|
|
|
|
|
# Update status
|
|
|
|
|
yield "Processing...", "Running task...", "", gr.update(visible=False), gr.update(visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Prepare flow for AgentRearrange
|
|
|
|
|
flow = None
|
|
|
|
|
if swarm_type == "AgentRearrange":
|
|
|
|
@ -1634,47 +1661,15 @@ def create_app():
|
|
|
|
|
f"Flow string: {flow}"
|
|
|
|
|
) # Debug: Print flow string
|
|
|
|
|
|
|
|
|
|
# Save API key to .env
|
|
|
|
|
env_path = find_dotenv()
|
|
|
|
|
if not env_path:
|
|
|
|
|
env_path = os.path.join(os.getcwd(), ".env")
|
|
|
|
|
with open(env_path, "w") as f:
|
|
|
|
|
f.write("")
|
|
|
|
|
if provider == "openai":
|
|
|
|
|
set_key(env_path, "OPENAI_API_KEY", api_key)
|
|
|
|
|
elif provider == "anthropic":
|
|
|
|
|
set_key(
|
|
|
|
|
env_path, "ANTHROPIC_API_KEY", api_key
|
|
|
|
|
)
|
|
|
|
|
elif provider == "cohere":
|
|
|
|
|
set_key(env_path, "COHERE_API_KEY", api_key)
|
|
|
|
|
elif provider == "gemini":
|
|
|
|
|
set_key(env_path, "GEMINI_API_KEY", api_key)
|
|
|
|
|
elif provider == "mistral":
|
|
|
|
|
set_key(env_path, "MISTRAL_API_KEY", api_key)
|
|
|
|
|
elif provider == "groq":
|
|
|
|
|
set_key(env_path, "GROQ_API_KEY", api_key)
|
|
|
|
|
elif provider == "perplexity":
|
|
|
|
|
set_key(
|
|
|
|
|
env_path, "PERPLEXITY_API_KEY", api_key
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
yield (
|
|
|
|
|
f"Error: {provider} this provider is not"
|
|
|
|
|
" present",
|
|
|
|
|
f"Error: {provider} not supported",
|
|
|
|
|
"",
|
|
|
|
|
gr.update(visible=True),
|
|
|
|
|
gr.update(visible=False)
|
|
|
|
|
)
|
|
|
|
|
return
|
|
|
|
|
# save api keys in memory
|
|
|
|
|
api_keys[provider] = api_key
|
|
|
|
|
|
|
|
|
|
agents = initialize_agents(
|
|
|
|
|
dynamic_temp,
|
|
|
|
|
agent_prompt_selector,
|
|
|
|
|
model_name,
|
|
|
|
|
provider,
|
|
|
|
|
api_key,
|
|
|
|
|
api_keys.get(provider), # Access API key from the dictionary
|
|
|
|
|
temperature,
|
|
|
|
|
max_tokens,
|
|
|
|
|
)
|
|
|
|
@ -1698,7 +1693,7 @@ def create_app():
|
|
|
|
|
flow=flow,
|
|
|
|
|
model_name=model_name,
|
|
|
|
|
provider=provider,
|
|
|
|
|
api_key=api_key,
|
|
|
|
|
api_key=api_keys.get(provider), # Pass the api key from memory
|
|
|
|
|
temperature=temperature,
|
|
|
|
|
max_tokens=max_tokens,
|
|
|
|
|
agents=agents_dict, # Changed here
|
|
|
|
@ -1716,6 +1711,45 @@ def create_app():
|
|
|
|
|
yield f"Error: {str(e)}", f"Error: {str(e)}", "", gr.update(visible=True), gr.update(visible=True)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# Save API key to .env
|
|
|
|
|
env_path = find_dotenv()
|
|
|
|
|
if not env_path:
|
|
|
|
|
env_path = os.path.join(os.getcwd(), ".env")
|
|
|
|
|
with open(env_path, "w") as f:
|
|
|
|
|
f.write("")
|
|
|
|
|
if not env_path:
|
|
|
|
|
env_path = os.path.join(os.getcwd(), ".env")
|
|
|
|
|
with open(env_path, "w") as f:
|
|
|
|
|
f.write("")
|
|
|
|
|
if provider == "openai":
|
|
|
|
|
set_key(env_path, "OPENAI_API_KEY", api_key)
|
|
|
|
|
elif provider == "anthropic":
|
|
|
|
|
set_key(
|
|
|
|
|
env_path, "ANTHROPIC_API_KEY", api_key
|
|
|
|
|
)
|
|
|
|
|
elif provider == "cohere":
|
|
|
|
|
set_key(env_path, "COHERE_API_KEY", api_key)
|
|
|
|
|
elif provider == "gemini":
|
|
|
|
|
set_key(env_path, "GEMINI_API_KEY", api_key)
|
|
|
|
|
elif provider == "mistral":
|
|
|
|
|
set_key(env_path, "MISTRAL_API_KEY", api_key)
|
|
|
|
|
elif provider == "groq":
|
|
|
|
|
set_key(env_path, "GROQ_API_KEY", api_key)
|
|
|
|
|
elif provider == "perplexity":
|
|
|
|
|
set_key(
|
|
|
|
|
env_path, "PERPLEXITY_API_KEY", api_key
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
yield (
|
|
|
|
|
f"Error: {provider} this provider is not"
|
|
|
|
|
" present",
|
|
|
|
|
f"Error: {provider} not supported",
|
|
|
|
|
"",
|
|
|
|
|
gr.update(visible=True),
|
|
|
|
|
gr.update(visible=False)
|
|
|
|
|
)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# Connect the update functions
|
|
|
|
|
agent_selector.change(
|
|
|
|
|
fn=update_ui_for_swarm_type,
|
|
|
|
|