examples -- concurrent examples and api rate limits examples

pull/962/merge
Kye Gomez 6 days ago
parent 4a86e28893
commit c6443ffdf2

@ -0,0 +1,40 @@
import json
import os
from swarms_client import SwarmsClient
from swarms_client.types import AgentSpecParam
from dotenv import load_dotenv
load_dotenv()
client = SwarmsClient(api_key=os.getenv("SWARMS_API_KEY"))
agent_spec = AgentSpecParam(
agent_name="doctor_agent",
description="A virtual doctor agent that provides evidence-based, safe, and empathetic medical advice for common health questions. Always reminds users to consult a healthcare professional for diagnoses or prescriptions.",
task="What is the best medicine for a cold?",
model_name="claude-3-5-sonnet-20241022",
system_prompt=(
"You are a highly knowledgeable, ethical, and empathetic virtual doctor. "
"Always provide evidence-based, safe, and practical medical advice. "
"If a question requires a diagnosis, prescription, or urgent care, remind the user to consult a licensed healthcare professional. "
"Be clear, concise, and avoid unnecessary medical jargon. "
"Never provide information that could be unsafe or misleading. "
"If unsure, say so and recommend seeing a real doctor."
),
max_loops=1,
temperature=0.4,
role="doctor",
)
# response = client.agent.run(
# agent_config=agent_spec,
# task="What is the best medicine for a cold?",
# )
# print(response)
print(json.dumps(client.models.list_available(), indent=4))
print(json.dumps(client.health.check(), indent=4))
print(json.dumps(client.client.get_logs(), indent=4))
print(json.dumps(client.client.rate.get_limits(), indent=4))
print(json.dumps(client.swarms.check_available(), indent=4))

@ -0,0 +1,12 @@
from swarms_client import SwarmsClient
from dotenv import load_dotenv
import os
load_dotenv()
client = SwarmsClient(api_key=os.getenv("SWARMS_API_KEY"))
response = client.client.rate.get_limits()
print(response)
print(client.health.check())

@ -7,7 +7,6 @@ for more targeted and customizable evaluation of agent outputs.
""" """
from swarms.agents.agent_judge import AgentJudge from swarms.agents.agent_judge import AgentJudge
import os
from dotenv import load_dotenv from dotenv import load_dotenv
load_dotenv() load_dotenv()
@ -21,8 +20,8 @@ judge = AgentJudge(
evaluation_criteria={ evaluation_criteria={
"correctness": 0.5, "correctness": 0.5,
"problem_solving_approach": 0.3, "problem_solving_approach": 0.3,
"explanation_clarity": 0.2 "explanation_clarity": 0.2,
} },
) )
# Sample output to evaluate # Sample output to evaluate
@ -39,7 +38,9 @@ evaluation = judge.run(task_response)
print(evaluation[0]) print(evaluation[0])
# Example 2: Specialized criteria for code evaluation # Example 2: Specialized criteria for code evaluation
print("\n=== Example 2: Code Evaluation with Specialized Criteria ===\n") print(
"\n=== Example 2: Code Evaluation with Specialized Criteria ===\n"
)
code_judge = AgentJudge( code_judge = AgentJudge(
model_name="claude-3-7-sonnet-20250219", model_name="claude-3-7-sonnet-20250219",
@ -47,8 +48,8 @@ code_judge = AgentJudge(
evaluation_criteria={ evaluation_criteria={
"code_correctness": 0.4, "code_correctness": 0.4,
"code_efficiency": 0.3, "code_efficiency": 0.3,
"code_readability": 0.3 "code_readability": 0.3,
} },
) )
# Sample code to evaluate # Sample code to evaluate
@ -76,25 +77,23 @@ print("\n=== Example 3: Comparing Multiple Agent Responses ===\n")
comparison_judge = AgentJudge( comparison_judge = AgentJudge(
model_name="claude-3-7-sonnet-20250219", model_name="claude-3-7-sonnet-20250219",
evaluation_criteria={ evaluation_criteria={"accuracy": 0.6, "completeness": 0.4},
"accuracy": 0.6,
"completeness": 0.4
}
) )
multiple_responses = comparison_judge.run([ multiple_responses = comparison_judge.run(
[
"Task: Explain the CAP theorem in distributed systems.\n\n" "Task: Explain the CAP theorem in distributed systems.\n\n"
"Agent A response: CAP theorem states that a distributed system cannot simultaneously " "Agent A response: CAP theorem states that a distributed system cannot simultaneously "
"provide Consistency, Availability, and Partition tolerance. In practice, you must " "provide Consistency, Availability, and Partition tolerance. In practice, you must "
"choose two out of these three properties.", "choose two out of these three properties.",
"Task: Explain the CAP theorem in distributed systems.\n\n" "Task: Explain the CAP theorem in distributed systems.\n\n"
"Agent B response: The CAP theorem, formulated by Eric Brewer, states that in a " "Agent B response: The CAP theorem, formulated by Eric Brewer, states that in a "
"distributed data store, you can only guarantee two of the following three properties: " "distributed data store, you can only guarantee two of the following three properties: "
"Consistency (all nodes see the same data at the same time), Availability (every request " "Consistency (all nodes see the same data at the same time), Availability (every request "
"receives a response), and Partition tolerance (the system continues to operate despite " "receives a response), and Partition tolerance (the system continues to operate despite "
"network failures). Most modern distributed systems choose to sacrifice consistency in " "network failures). Most modern distributed systems choose to sacrifice consistency in "
"favor of availability and partition tolerance, implementing eventual consistency models instead." "favor of availability and partition tolerance, implementing eventual consistency models instead.",
]) ]
)
print(multiple_responses[0]) print(multiple_responses[0])

@ -25,7 +25,6 @@ from swarms.utils.concurrent_wrapper import (
@concurrent( @concurrent(
name="data_processor", name="data_processor",
description="Process data concurrently", description="Process data concurrently",
max_workers=4,
timeout=30, timeout=30,
retry_on_failure=True, retry_on_failure=True,
max_retries=2, max_retries=2,

@ -17,20 +17,23 @@ class AgentJudgeInitializationError(Exception):
pass pass
class AgentJudgeExecutionError(Exception): class AgentJudgeExecutionError(Exception):
""" """
Exception raised when there is an error executing the AgentJudge. Exception raised when there is an error executing the AgentJudge.
""" """
pass pass
class AgentJudgeFeedbackCycleError(Exception): class AgentJudgeFeedbackCycleError(Exception):
""" """
Exception raised when there is an error in the feedback cycle. Exception raised when there is an error in the feedback cycle.
""" """
pass pass
class AgentJudge: class AgentJudge:
""" """
A specialized agent designed to evaluate and judge outputs from other agents or systems. A specialized agent designed to evaluate and judge outputs from other agents or systems.
@ -99,9 +102,7 @@ class AgentJudge:
model_name: str = "openai/o1", model_name: str = "openai/o1",
max_loops: int = 1, max_loops: int = 1,
verbose: bool = False, verbose: bool = False,
evaluation_criteria: Optional[Dict[str, float]] = None, evaluation_criteria: Optional[Dict[str, float]] = None,
*args, *args,
**kwargs, **kwargs,
): ):
@ -123,13 +124,10 @@ class AgentJudge:
criteria_str += f"- {criterion}: weight = {weight}\n" criteria_str += f"- {criterion}: weight = {weight}\n"
enhanced_prompt += criteria_str enhanced_prompt += criteria_str
self.agent = Agent( self.agent = Agent(
agent_name=agent_name, agent_name=agent_name,
agent_description=description, agent_description=description,
system_prompt=enhanced_prompt, system_prompt=enhanced_prompt,
model_name=model_name, model_name=model_name,
max_loops=1, max_loops=1,
*args, *args,
@ -250,7 +248,9 @@ class AgentJudge:
# 添加评估标准到任务描述中 # 添加评估标准到任务描述中
task_instruction = "You are an expert AI agent judge. Carefully review the following output(s) generated by another agent. " task_instruction = "You are an expert AI agent judge. Carefully review the following output(s) generated by another agent. "
task_instruction += "Your job is to provide a detailed, constructive, and actionable critique that will help the agent improve its future performance. " task_instruction += "Your job is to provide a detailed, constructive, and actionable critique that will help the agent improve its future performance. "
task_instruction += "Your feedback should address the following points:\n" task_instruction += (
"Your feedback should address the following points:\n"
)
task_instruction += "1. Strengths: What did the agent do well? Highlight any correct reasoning, clarity, or effective problem-solving.\n" task_instruction += "1. Strengths: What did the agent do well? Highlight any correct reasoning, clarity, or effective problem-solving.\n"
task_instruction += "2. Weaknesses: Identify any errors, omissions, unclear reasoning, or areas where the output could be improved.\n" task_instruction += "2. Weaknesses: Identify any errors, omissions, unclear reasoning, or areas where the output could be improved.\n"
task_instruction += "3. Suggestions: Offer specific, practical recommendations for how the agent can improve its next attempt. " task_instruction += "3. Suggestions: Offer specific, practical recommendations for how the agent can improve its next attempt. "
@ -261,8 +261,13 @@ class AgentJudge:
if self.evaluation_criteria: if self.evaluation_criteria:
criteria_names = list(self.evaluation_criteria.keys()) criteria_names = list(self.evaluation_criteria.keys())
task_instruction += "\nPlease use these specific evaluation criteria with their respective weights:\n" task_instruction += "\nPlease use these specific evaluation criteria with their respective weights:\n"
for criterion, weight in self.evaluation_criteria.items(): for (
task_instruction += f"- {criterion}: weight = {weight}\n" criterion,
weight,
) in self.evaluation_criteria.items():
task_instruction += (
f"- {criterion}: weight = {weight}\n"
)
task_instruction += "Be thorough, objective, and professional. Your goal is to help the agent learn and produce better results in the future.\n\n" task_instruction += "Be thorough, objective, and professional. Your goal is to help the agent learn and produce better results in the future.\n\n"
task_instruction += f"Output(s) to evaluate:\n{prompt}\n" task_instruction += f"Output(s) to evaluate:\n{prompt}\n"
@ -437,4 +442,3 @@ class AgentJudge:
responses.append(response) responses.append(response)
return responses return responses

@ -2,9 +2,9 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Callable, List, Optional, Union from typing import Callable, List, Optional, Union
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.utils.output_types import OutputType
from swarms.structs.rearrange import AgentRearrange from swarms.structs.rearrange import AgentRearrange
from swarms.utils.loguru_logger import initialize_logger from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.output_types import OutputType
logger = initialize_logger(log_folder="sequential_workflow") logger = initialize_logger(log_folder="sequential_workflow")
@ -29,6 +29,7 @@ class SequentialWorkflow:
def __init__( def __init__(
self, self,
id: str = "sequential_workflow",
name: str = "SequentialWorkflow", name: str = "SequentialWorkflow",
description: str = "Sequential Workflow, where agents are executed in a sequence.", description: str = "Sequential Workflow, where agents are executed in a sequence.",
agents: List[Union[Agent, Callable]] = [], agents: List[Union[Agent, Callable]] = [],
@ -38,6 +39,7 @@ class SequentialWorkflow:
*args, *args,
**kwargs, **kwargs,
): ):
self.id = id
self.name = name self.name = name
self.description = description self.description = description
self.agents = agents self.agents = agents

@ -1,46 +1,45 @@
from swarms.tools.tool_utils import ( from swarms.tools.base_tool import BaseTool
scrape_tool_func_docs, from swarms.tools.cohere_func_call_schema import (
tool_find_by_name, CohereFuncSchema,
ParameterDefinition,
) )
from swarms.tools.pydantic_to_json import ( from swarms.tools.json_utils import base_model_to_json
_remove_a_key, from swarms.tools.mcp_client_call import (
base_model_to_openai_function, _create_server_tool_mapping,
multi_base_model_to_openai_function, _create_server_tool_mapping_async,
_execute_tool_call_simple,
_execute_tool_on_server,
aget_mcp_tools,
execute_multiple_tools_on_multiple_mcp_servers,
execute_multiple_tools_on_multiple_mcp_servers_sync,
execute_tool_call_simple,
get_mcp_tools_sync,
get_tools_for_multiple_mcp_servers,
) )
from swarms.tools.openai_func_calling_schema_pydantic import ( from swarms.tools.openai_func_calling_schema_pydantic import (
OpenAIFunctionCallSchema as OpenAIFunctionCallSchemaBaseModel, OpenAIFunctionCallSchema as OpenAIFunctionCallSchemaBaseModel,
) )
from swarms.tools.openai_tool_creator_decorator import tool
from swarms.tools.py_func_to_openai_func_str import ( from swarms.tools.py_func_to_openai_func_str import (
get_openai_function_schema_from_func, Function,
load_basemodels_if_needed, ToolFunction,
get_load_param_if_needed_function, get_load_param_if_needed_function,
get_openai_function_schema_from_func,
get_parameters, get_parameters,
get_required_params, get_required_params,
Function, load_basemodels_if_needed,
ToolFunction,
) )
from swarms.tools.openai_tool_creator_decorator import tool from swarms.tools.pydantic_to_json import (
from swarms.tools.base_tool import BaseTool _remove_a_key,
from swarms.tools.cohere_func_call_schema import ( base_model_to_openai_function,
CohereFuncSchema, multi_base_model_to_openai_function,
ParameterDefinition,
) )
from swarms.tools.tool_registry import ToolStorage, tool_registry from swarms.tools.tool_registry import ToolStorage, tool_registry
from swarms.tools.json_utils import base_model_to_json from swarms.tools.tool_utils import (
from swarms.tools.mcp_client_call import ( scrape_tool_func_docs,
execute_tool_call_simple, tool_find_by_name,
_execute_tool_call_simple,
get_tools_for_multiple_mcp_servers,
get_mcp_tools_sync,
aget_mcp_tools,
execute_multiple_tools_on_multiple_mcp_servers,
execute_multiple_tools_on_multiple_mcp_servers_sync,
_create_server_tool_mapping,
_create_server_tool_mapping_async,
_execute_tool_on_server,
) )
__all__ = [ __all__ = [
"scrape_tool_func_docs", "scrape_tool_func_docs",
"tool_find_by_name", "tool_find_by_name",

@ -1,3 +1,4 @@
import os
import asyncio import asyncio
import concurrent.futures import concurrent.futures
import inspect import inspect
@ -156,6 +157,9 @@ def concurrent(
Callable: Decorated function that can execute concurrently Callable: Decorated function that can execute concurrently
""" """
if max_workers is None:
max_workers = os.cpu_count()
def decorator(func: Callable[..., T]) -> Callable[..., T]: def decorator(func: Callable[..., T]) -> Callable[..., T]:
config = ConcurrentConfig( config = ConcurrentConfig(
name=name or func.__name__, name=name or func.__name__,

Loading…
Cancel
Save