parent
80d66db4a5
commit
bd187f98ac
@ -0,0 +1,92 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
from swarms import Agent, GroupChat
|
||||||
|
|
||||||
|
# Example usage:
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create individual agents with the OpenAIChat model
|
||||||
|
model1 = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
model2 = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
model3 = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
agent1 = Agent(
|
||||||
|
agent_name="Agent1",
|
||||||
|
llm=model1,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="agent1_state.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
agent2 = Agent(
|
||||||
|
agent_name="Agent2",
|
||||||
|
llm=model2,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="agent2_state.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
agent3 = Agent(
|
||||||
|
agent_name="Agent3",
|
||||||
|
llm=model3,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="agent3_state.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
aggregator_agent = Agent(
|
||||||
|
agent_name="AggregatorAgent",
|
||||||
|
llm=model1,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="aggregator_agent_state.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the Mixture of Agents class
|
||||||
|
moa = GroupChat(
|
||||||
|
agents=[agent1, agent2, agent3],
|
||||||
|
max_rounds=1,
|
||||||
|
group_objective="Establish a ROTH IRA",
|
||||||
|
selector_agent=aggregator_agent,
|
||||||
|
)
|
||||||
|
|
||||||
|
out = moa.run(
|
||||||
|
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
|
||||||
|
)
|
||||||
|
print(out)
|
@ -0,0 +1,67 @@
|
|||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
from swarms.prompts.finance_agent_sys_prompt import (
|
||||||
|
FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
)
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
temperature=0.1,
|
||||||
|
max_tokens=2000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Financial-Analysis-Agent",
|
||||||
|
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="finance_agent.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
# output_type="json",
|
||||||
|
output_type=str,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
out = agent.run(
|
||||||
|
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
|
||||||
|
)
|
||||||
|
print(out)
|
||||||
|
|
||||||
|
|
||||||
|
def log_agent_data(data: dict):
|
||||||
|
import requests
|
||||||
|
|
||||||
|
data_dict = {
|
||||||
|
"data": data,
|
||||||
|
}
|
||||||
|
|
||||||
|
url = "https://swarms.world/api/get-agents/log-agents"
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(url, json=data_dict, headers=headers)
|
||||||
|
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
|
||||||
|
out = log_agent_data(agent.to_dict())
|
||||||
|
print(out)
|
@ -0,0 +1,93 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
from swarms import Agent, MixtureOfAgents
|
||||||
|
|
||||||
|
# Example usage:
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create individual agents with the OpenAIChat model
|
||||||
|
model1 = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
model2 = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
model3 = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
agent1 = Agent(
|
||||||
|
agent_name="Agent1",
|
||||||
|
llm=model1,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="agent1_state.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
agent2 = Agent(
|
||||||
|
agent_name="Agent2",
|
||||||
|
llm=model2,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="agent2_state.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
agent3 = Agent(
|
||||||
|
agent_name="Agent3",
|
||||||
|
llm=model3,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="agent3_state.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
aggregator_agent = Agent(
|
||||||
|
agent_name="AggregatorAgent",
|
||||||
|
llm=model1,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="aggregator_agent_state.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the Mixture of Agents class
|
||||||
|
moa = MixtureOfAgents(
|
||||||
|
reference_agents=[agent1, agent2, agent3],
|
||||||
|
aggregator_agent=aggregator_agent,
|
||||||
|
aggregator_system_prompt="""You have been provided with a set of responses from various agents.
|
||||||
|
Your task is to synthesize these responses into a single, high-quality response.""",
|
||||||
|
layers=3,
|
||||||
|
)
|
||||||
|
|
||||||
|
out = moa.run(
|
||||||
|
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
|
||||||
|
)
|
||||||
|
print(out)
|
@ -0,0 +1,16 @@
|
|||||||
|
def log_agent_data(data: dict):
|
||||||
|
import requests
|
||||||
|
|
||||||
|
data_dict = {
|
||||||
|
"data": data,
|
||||||
|
}
|
||||||
|
|
||||||
|
url = "https://swarms.world/api/get-agents/log-agents"
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(url, json=data_dict, headers=headers)
|
||||||
|
|
||||||
|
return response.json()
|
@ -0,0 +1,92 @@
|
|||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
from loguru import logger
|
||||||
|
from typing import Tuple, Union, List
|
||||||
|
from e2b_code_interpreter import CodeInterpreter
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
|
# Helper function to lazily install the package if not found
|
||||||
|
def lazy_install(package: str) -> None:
|
||||||
|
try:
|
||||||
|
__import__(package)
|
||||||
|
except ImportError:
|
||||||
|
logger.warning(f"{package} not found. Installing now...")
|
||||||
|
subprocess.check_call(
|
||||||
|
[sys.executable, "-m", "pip", "install", package]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Ensure e2b_code_interpreter is installed lazily
|
||||||
|
lazy_install("e2b_code_interpreter")
|
||||||
|
|
||||||
|
|
||||||
|
def code_interpret(
|
||||||
|
code_interpreter: CodeInterpreter, code: str
|
||||||
|
) -> Union[Tuple[List[str], List[str]], None]:
|
||||||
|
"""
|
||||||
|
Runs AI-generated code using the provided CodeInterpreter and logs the process.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
code_interpreter (CodeInterpreter): An instance of the CodeInterpreter class.
|
||||||
|
code (str): The code string to be executed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Union[Tuple[List[str], List[str]], None]: A tuple of (results, logs) if successful,
|
||||||
|
or None if an error occurred.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the code or code_interpreter is invalid.
|
||||||
|
"""
|
||||||
|
if not isinstance(code_interpreter, CodeInterpreter):
|
||||||
|
logger.error("Invalid CodeInterpreter instance provided.")
|
||||||
|
raise ValueError(
|
||||||
|
"code_interpreter must be an instance of CodeInterpreter."
|
||||||
|
)
|
||||||
|
if not isinstance(code, str) or not code.strip():
|
||||||
|
logger.error("Invalid code provided.")
|
||||||
|
raise ValueError("code must be a non-empty string.")
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"\n{'='*50}\n> Running the following AI-generated code:\n{code}\n{'='*50}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
exec_result = code_interpreter.notebook.exec_cell(
|
||||||
|
code,
|
||||||
|
# on_stderr=lambda stderr: logger.error(f"[Code Interpreter stderr] {stderr}"),
|
||||||
|
# on_stdout=lambda stdout: logger.info(f"[Code Interpreter stdout] {stdout}")
|
||||||
|
)
|
||||||
|
|
||||||
|
if exec_result.error:
|
||||||
|
logger.error(
|
||||||
|
f"[Code Interpreter error] {exec_result.error}"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
logger.success("Code executed successfully.")
|
||||||
|
# return exec_result.results, exec_result.logs
|
||||||
|
# return exec_result.results
|
||||||
|
prompt = f"{exec_result.results}: {exec_result.logs}"
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
logger.exception(
|
||||||
|
"An error occurred during code interpretation."
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# # from e2b_code_interpreter import CodeInterpreter
|
||||||
|
|
||||||
|
# interpreter = CodeInterpreter()
|
||||||
|
# code = "print('Hello, World!')"
|
||||||
|
|
||||||
|
# result = code_interpret(interpreter, code)
|
||||||
|
|
||||||
|
# if result:
|
||||||
|
# results = result
|
||||||
|
# print("Execution Results:", results)
|
||||||
|
# # print("Execution Logs:", logs)
|
@ -1,97 +0,0 @@
|
|||||||
import json
|
|
||||||
import re
|
|
||||||
from typing import Type, TypeVar
|
|
||||||
|
|
||||||
from pydantic import BaseModel, ValidationError
|
|
||||||
|
|
||||||
T = TypeVar("T", bound=BaseModel)
|
|
||||||
|
|
||||||
|
|
||||||
class JsonParsingException(Exception):
|
|
||||||
"""Custom exception for errors in JSON parsing."""
|
|
||||||
|
|
||||||
|
|
||||||
class JsonOutputParser:
|
|
||||||
"""Parse JSON output using a Pydantic model.
|
|
||||||
|
|
||||||
This parser is designed to extract JSON formatted data from a given string
|
|
||||||
and parse it using a specified Pydantic model for validation.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
pydantic_object: A Pydantic model class for parsing and validation.
|
|
||||||
pattern: A regex pattern to match JSON code blocks.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from pydantic import BaseModel
|
|
||||||
>>> from swarms.utils.json_output_parser import JsonOutputParser
|
|
||||||
>>> class MyModel(BaseModel):
|
|
||||||
... name: str
|
|
||||||
... age: int
|
|
||||||
...
|
|
||||||
>>> parser = JsonOutputParser(MyModel)
|
|
||||||
>>> text = "```json\n{\"name\": \"John\", \"age\": 42}\n```"
|
|
||||||
>>> model = parser.parse(text)
|
|
||||||
>>> model.name
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, pydantic_object: Type[T]):
|
|
||||||
self.pydantic_object = pydantic_object
|
|
||||||
self.pattern = re.compile(
|
|
||||||
r"^```(?:json)?(?P<json>[^`]*)", re.MULTILINE | re.DOTALL
|
|
||||||
)
|
|
||||||
|
|
||||||
def parse(self, text: str) -> T:
|
|
||||||
"""Parse the provided text to extract and validate JSON data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
text: A string containing potential JSON data.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
An instance of the specified Pydantic model with parsed data.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
JsonParsingException: If parsing or validation fails.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
match = re.search(self.pattern, text.strip())
|
|
||||||
json_str = match.group("json") if match else text
|
|
||||||
|
|
||||||
json_object = json.loads(json_str)
|
|
||||||
return self.pydantic_object.parse_obj(json_object)
|
|
||||||
|
|
||||||
except (json.JSONDecodeError, ValidationError) as e:
|
|
||||||
name = self.pydantic_object.__name__
|
|
||||||
msg = (
|
|
||||||
f"Failed to parse {name} from text '{text}'."
|
|
||||||
f" Error: {e}"
|
|
||||||
)
|
|
||||||
raise JsonParsingException(msg) from e
|
|
||||||
|
|
||||||
def get_format_instructions(self) -> str:
|
|
||||||
"""Generate formatting instructions based on the Pydantic model schema.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A string containing formatting instructions.
|
|
||||||
"""
|
|
||||||
schema = self.pydantic_object.schema()
|
|
||||||
reduced_schema = {
|
|
||||||
k: v
|
|
||||||
for k, v in schema.items()
|
|
||||||
if k not in ["title", "type"]
|
|
||||||
}
|
|
||||||
schema_str = json.dumps(reduced_schema, indent=4)
|
|
||||||
|
|
||||||
format_instructions = (
|
|
||||||
f"JSON Formatting Instructions:\n{schema_str}"
|
|
||||||
)
|
|
||||||
return format_instructions
|
|
||||||
|
|
||||||
|
|
||||||
# # Example usage
|
|
||||||
# class ExampleModel(BaseModel):
|
|
||||||
# field1: int
|
|
||||||
# field2: str
|
|
||||||
|
|
||||||
# parser = JsonOutputParser(ExampleModel)
|
|
||||||
# # Use parser.parse(text) to parse JSON data
|
|
@ -1,90 +0,0 @@
|
|||||||
import json
|
|
||||||
import re
|
|
||||||
from typing import Type, TypeVar
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
from pydantic import BaseModel, ValidationError
|
|
||||||
|
|
||||||
T = TypeVar("T", bound=BaseModel)
|
|
||||||
|
|
||||||
|
|
||||||
class YamlParsingException(Exception):
|
|
||||||
"""Custom exception for errors in YAML parsing."""
|
|
||||||
|
|
||||||
|
|
||||||
class YamlOutputParser:
|
|
||||||
"""Parse YAML output using a Pydantic model.
|
|
||||||
|
|
||||||
This parser is designed to extract YAML formatted data from a given string
|
|
||||||
and parse it using a specified Pydantic model for validation.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
pydantic_object: A Pydantic model class for parsing and validation.
|
|
||||||
pattern: A regex pattern to match YAML code blocks.
|
|
||||||
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from pydantic import BaseModel
|
|
||||||
>>> from swarms.utils.yaml_output_parser import YamlOutputParser
|
|
||||||
>>> class MyModel(BaseModel):
|
|
||||||
... name: str
|
|
||||||
... age: int
|
|
||||||
...
|
|
||||||
>>> parser = YamlOutputParser(MyModel)
|
|
||||||
>>> text = "```yaml\nname: John\nage: 42\n```"
|
|
||||||
>>> model = parser.parse(text)
|
|
||||||
>>> model.name
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, pydantic_object: Type[T]):
|
|
||||||
self.pydantic_object = pydantic_object
|
|
||||||
self.pattern = re.compile(
|
|
||||||
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
|
|
||||||
)
|
|
||||||
|
|
||||||
def parse(self, text: str) -> T:
|
|
||||||
"""Parse the provided text to extract and validate YAML data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
text: A string containing potential YAML data.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
An instance of the specified Pydantic model with parsed data.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
YamlParsingException: If parsing or validation fails.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
match = re.search(self.pattern, text.strip())
|
|
||||||
yaml_str = match.group("yaml") if match else text
|
|
||||||
|
|
||||||
json_object = yaml.safe_load(yaml_str)
|
|
||||||
return self.pydantic_object.parse_obj(json_object)
|
|
||||||
|
|
||||||
except (yaml.YAMLError, ValidationError) as e:
|
|
||||||
name = self.pydantic_object.__name__
|
|
||||||
msg = (
|
|
||||||
f"Failed to parse {name} from text '{text}'."
|
|
||||||
f" Error: {e}"
|
|
||||||
)
|
|
||||||
raise YamlParsingException(msg) from e
|
|
||||||
|
|
||||||
def get_format_instructions(self) -> str:
|
|
||||||
"""Generate formatting instructions based on the Pydantic model schema.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A string containing formatting instructions.
|
|
||||||
"""
|
|
||||||
schema = self.pydantic_object.schema()
|
|
||||||
reduced_schema = {
|
|
||||||
k: v
|
|
||||||
for k, v in schema.items()
|
|
||||||
if k not in ["title", "type"]
|
|
||||||
}
|
|
||||||
schema_str = json.dumps(reduced_schema, indent=4)
|
|
||||||
|
|
||||||
format_instructions = (
|
|
||||||
f"YAML Formatting Instructions:\n{schema_str}"
|
|
||||||
)
|
|
||||||
return format_instructions
|
|
Loading…
Reference in new issue