parent
1b64ef07fb
commit
a59a39c43f
@ -0,0 +1,176 @@
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
from swarms.utils.loguru_logger import logger
|
||||
from swarms.models.popular_llms import Anthropic, OpenAIChat
|
||||
from swarms.models.base_llm import BaseLLM
|
||||
from swarms.memory.base_vectordb import BaseVectorDatabase
|
||||
|
||||
boss_sys_prompt = (
|
||||
"You're the Swarm Orchestrator, like a project manager of a"
|
||||
" bustling hive. When a task arises, you tap into your network of"
|
||||
" worker agents who are ready to jump into action. Whether it's"
|
||||
" organizing data, handling logistics, or crunching numbers, you"
|
||||
" delegate tasks strategically to maximize efficiency. Picture"
|
||||
" yourself as the conductor of a well-oiled machine,"
|
||||
" orchestrating the workflow seamlessly to achieve optimal"
|
||||
" results with your team of dedicated worker agents."
|
||||
)
|
||||
|
||||
|
||||
class AgentSchema(BaseModel):
|
||||
name: str = Field(
|
||||
...,
|
||||
title="Name of the agent",
|
||||
description="Name of the agent",
|
||||
)
|
||||
system_prompt: str = (
|
||||
Field(
|
||||
...,
|
||||
title="System prompt for the agent",
|
||||
description="System prompt for the agent",
|
||||
),
|
||||
)
|
||||
rules: str = Field(
|
||||
...,
|
||||
title="Rules",
|
||||
description="Rules for the agent",
|
||||
)
|
||||
llm: str = Field(
|
||||
...,
|
||||
title="Language model",
|
||||
description="Language model for the agent: `GPT4` or `Claude",
|
||||
)
|
||||
|
||||
# tools: List[ToolSchema] = Field(
|
||||
# ...,
|
||||
# title="Tools available to the agent",
|
||||
# description="Either `browser` or `terminal`",
|
||||
# )
|
||||
# task: str = Field(
|
||||
# ...,
|
||||
# title="Task assigned to the agent",
|
||||
# description="Task assigned to the agent",
|
||||
# )
|
||||
# TODO: Add more fields here such as the agent's language model, tools, etc.
|
||||
|
||||
|
||||
class HassSchema(BaseModel):
|
||||
plan: str = Field(
|
||||
...,
|
||||
title="Plan to solve the input problem",
|
||||
description="List of steps to solve the problem",
|
||||
)
|
||||
agents: List[AgentSchema] = Field(
|
||||
...,
|
||||
title="List of agents to use for the problem",
|
||||
description="List of agents to use for the problem",
|
||||
)
|
||||
# Rules for the agents
|
||||
rules: str = Field(
|
||||
...,
|
||||
title="Rules for the agents",
|
||||
description="Rules for the agents",
|
||||
)
|
||||
|
||||
|
||||
class HiearchicalSwarm(BaseSwarm):
|
||||
def __init__(
|
||||
self,
|
||||
director: Agent = None,
|
||||
subordinates: List[Agent] = [],
|
||||
workers: List[Agent] = [],
|
||||
director_sys_prompt: str = boss_sys_prompt,
|
||||
director_name: str = "Swarm Orchestrator",
|
||||
director_agent_creation_schema: BaseModel = HassSchema,
|
||||
director_llm: BaseLLM = Anthropic,
|
||||
communication_protocol: BaseVectorDatabase = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.director = director
|
||||
self.subordinates = subordinates
|
||||
self.workers = workers
|
||||
self.director_sys_prompt = director_sys_prompt
|
||||
self.director_name = director_name
|
||||
self.director_agent_creation_schema = (
|
||||
director_agent_creation_schema
|
||||
)
|
||||
self.director_llm = director_llm
|
||||
self.communication_protocol = communication_protocol
|
||||
|
||||
def create_director(self, *args, **kwargs):
|
||||
"""
|
||||
Create the director agent based on the provided schema.
|
||||
"""
|
||||
name = self.director_name
|
||||
system_prompt = self.director_sys_prompt
|
||||
director_llm = self.director_llm
|
||||
|
||||
if director_llm == Anthropic:
|
||||
Anthropic(*args, **kwargs)
|
||||
elif director_llm == OpenAIChat:
|
||||
OpenAIChat(*args, **kwargs)
|
||||
|
||||
logger.info(
|
||||
f"Creating Director Agent: {name} with system prompt:"
|
||||
f" {system_prompt}"
|
||||
)
|
||||
|
||||
director = Agent(
|
||||
agent_name=name,
|
||||
system_prompt=system_prompt,
|
||||
llm=director_llm,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
)
|
||||
|
||||
return director
|
||||
|
||||
def create_worker_agents(
|
||||
agents: List[AgentSchema],
|
||||
) -> List[Agent]:
|
||||
"""
|
||||
Create and initialize agents based on the provided AgentSchema objects.
|
||||
|
||||
Args:
|
||||
agents (List[AgentSchema]): A list of AgentSchema objects containing agent information.
|
||||
|
||||
Returns:
|
||||
List[Agent]: The initialized Agent objects.
|
||||
|
||||
"""
|
||||
agent_list = []
|
||||
for agent in agents:
|
||||
name = agent.name
|
||||
system_prompt = agent.system_prompt
|
||||
|
||||
logger.info(
|
||||
f"Creating agent: {name} with system prompt:"
|
||||
f" {system_prompt}"
|
||||
)
|
||||
|
||||
out = Agent(
|
||||
agent_name=name,
|
||||
system_prompt=system_prompt,
|
||||
# llm=Anthropic(
|
||||
# anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")
|
||||
# ),
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
)
|
||||
|
||||
# network.add_agent(out)
|
||||
agent_list.append(out)
|
||||
|
||||
return agent_list
|
@ -1,98 +0,0 @@
|
||||
swarms
|
||||
|
||||
pip install swarms
|
||||
swarms is the most pythonic way of writing cognitive systems. Leveraging pydantic models as output schemas combined with langchain in the backend allows for a seamless integration of llms into your apps. It utilizes OpenAI Functions or LlamaCpp grammars (json-schema-mode) for efficient structured output. In the backend it compiles the swarms syntax into langchain runnables so you can easily invoke, stream or batch process your pipelines.
|
||||
|
||||
Open in GitHub Codespaces
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from swarms import Anthropic
|
||||
from swarms import Agent
|
||||
|
||||
|
||||
# Initialize the schema for the person's information
|
||||
class Schema(BaseModel):
|
||||
name: str = Field(..., title="Name of the person")
|
||||
agent: int = Field(..., title="Age of the person")
|
||||
is_student: bool = Field(..., title="Whether the person is a student")
|
||||
courses: list[str] = Field(
|
||||
..., title="List of courses the person is taking"
|
||||
)
|
||||
|
||||
# Convert the schema to a JSON string
|
||||
tool_schema = Schema(
|
||||
name="Tool Name",
|
||||
agent=1,
|
||||
is_student=True,
|
||||
courses=["Course1", "Course2"],
|
||||
)
|
||||
|
||||
# Define the task to generate a person's information
|
||||
task = "Generate a person's information based on the following schema:"
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Person Information Generator",
|
||||
system_prompt=(
|
||||
"Generate a person's information based on the following schema:"
|
||||
),
|
||||
# Set the tool schema to the JSON string -- this is the key difference
|
||||
tool_schema=tool_schema,
|
||||
llm=Anthropic(),
|
||||
max_loops=3,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
interactive=True,
|
||||
# Set the output type to the tool schema which is a BaseModel
|
||||
output_type=tool_schema, # or dict, or str
|
||||
metadata_output_type="json",
|
||||
# List of schemas that the agent can handle
|
||||
list_tool_schemas = [tool_schema],
|
||||
function_calling_format_type = "OpenAI",
|
||||
function_calling_type = "json" # or soon yaml
|
||||
)
|
||||
|
||||
# Run the agent to generate the person's information
|
||||
generated_data = agent.run(task)
|
||||
|
||||
# Print the generated data
|
||||
print(f"Generated data: {generated_data}")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Features
|
||||
🐍 pythonic
|
||||
🔀 easy swap between openai or local models
|
||||
🔄 dynamic output types (pydantic models, or primitives)
|
||||
👁️ vision llm support
|
||||
🧠 langchain_core as backend
|
||||
📝 jinja templating for prompts
|
||||
🏗️ reliable structured output
|
||||
🔁 auto retry parsing
|
||||
🔧 langsmith support
|
||||
🔄 sync, async, streaming, parallel, fallbacks
|
||||
📦 gguf download from huggingface
|
||||
✅ type hints for all functions and mypy support
|
||||
🗣️ chat router component
|
||||
🧩 composable with langchain LCEL
|
||||
🛠️ easy error handling
|
||||
🚦 enums and literal support
|
||||
📐 custom parsing types
|
||||
Documentation
|
||||
Checkout the docs here 👈
|
||||
|
||||
Also highly recommend to try and run the examples in the ./examples folder.
|
||||
|
||||
Contribution
|
||||
You want to contribute? Thanks, that's great! For more information checkout the Contributing Guide. Please run the dev setup to get started:
|
||||
|
||||
git clone https://github.com/kyegomez/swarms.git && cd swarms
|
||||
|
||||
./dev_setup.sh
|
||||
About
|
||||
⛓️ build cognitive systems, pythonic
|
@ -1,29 +1,31 @@
|
||||
from swarms import Agent
|
||||
from swarms.models.base_llm import AbstractLLM
|
||||
from swarms.models.base_llm import BaseLLM
|
||||
|
||||
|
||||
class ExampleLLM(AbstractLLM):
|
||||
def __init__():
|
||||
# Define a custom LLM class
|
||||
class ExampleLLM(BaseLLM):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def run(self, task: str, *args, **kwargs):
|
||||
# Your LLM logic here
|
||||
pass
|
||||
|
||||
|
||||
## Initialize the workflow
|
||||
# Initialize the workflow
|
||||
agent = Agent(
|
||||
llm=ExampleLLM(),
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
interactive=True,
|
||||
llm=ExampleLLM(), # Instantiate the ExampleLLM class
|
||||
max_loops="auto", # Set the maximum number of loops to "auto"
|
||||
autosave=True, # Enable autosave feature
|
||||
dashboard=False, # Disable the dashboard
|
||||
streaming_on=True, # Enable streaming
|
||||
verbose=True, # Enable verbose mode
|
||||
stopping_token="<DONE>", # Set the stopping token to "<DONE>"
|
||||
interactive=True, # Enable interactive mode
|
||||
)
|
||||
|
||||
# Run the workflow on a task
|
||||
agent(
|
||||
"Generate a transcript for a youtube video on what swarms are!"
|
||||
" Output a <DONE> token when done."
|
||||
"Generate a transcript for a youtube video on what swarms are!" # Specify the task
|
||||
" Output a <DONE> token when done." # Specify the stopping condition
|
||||
)
|
||||
|
@ -1,48 +0,0 @@
|
||||
from swarms import Agent, OpenAI
|
||||
from swarms.structs.groupchat import GroupChat, GroupChatManager
|
||||
|
||||
api_key = ""
|
||||
|
||||
llm = OpenAI(
|
||||
openai_api_key=api_key,
|
||||
temperature=0.5,
|
||||
max_tokens=3000,
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
flow1 = Agent(
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
system_message="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE",
|
||||
name="silly",
|
||||
dashboard=True,
|
||||
)
|
||||
flow2 = Agent(
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
system_message="YOU ARE VERY SMART AND ANSWER RIDDLES",
|
||||
name="detective",
|
||||
dashboard=True,
|
||||
)
|
||||
flow3 = Agent(
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
system_message="YOU MAKE RIDDLES",
|
||||
name="riddler",
|
||||
dashboard=True,
|
||||
)
|
||||
manager = Agent(
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
system_message="YOU ARE A GROUP CHAT MANAGER",
|
||||
name="manager",
|
||||
dashboard=True,
|
||||
)
|
||||
|
||||
|
||||
# Example usage:
|
||||
agents = [flow1, flow2, flow3]
|
||||
|
||||
group_chat = GroupChat(agents=agents, messages=[], max_round=10)
|
||||
chat_manager = GroupChatManager(groupchat=group_chat, selector=manager)
|
||||
chat_history = chat_manager("Write me a riddle")
|
@ -1,18 +0,0 @@
|
||||
import os
|
||||
from swarms import OpenAIChat, Agent
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Create a chat instance
|
||||
llm = OpenAIChat(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
)
|
||||
|
||||
# Create an agent
|
||||
agent = Agent(
|
||||
agent_name="GPT-3",
|
||||
llm=llm,
|
||||
)
|
@ -0,0 +1,39 @@
|
||||
from typing import Annotated
|
||||
from swarms import create_openai_tool
|
||||
from openai import OpenAI
|
||||
|
||||
# Create an instance of the OpenAI client
|
||||
client = OpenAI()
|
||||
|
||||
# Define the user messages for the chat conversation
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What's the weather like in San Francisco, Tokyo, and Paris?",
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
# Define the BMI calculator tool using the create_openai_tool decorator
|
||||
@create_openai_tool(
|
||||
name="BMI Calculator",
|
||||
description="Calculate the Body Mass Index (BMI)",
|
||||
)
|
||||
def calculate_bmi(
|
||||
weight: Annotated[float, "Weight in kilograms"],
|
||||
height: Annotated[float, "Height in meters"],
|
||||
) -> Annotated[float, "Body Mass Index"]:
|
||||
"""Calculate the Body Mass Index (BMI) given a person's weight and height."""
|
||||
return weight / (height**2)
|
||||
|
||||
|
||||
# Create a chat completion request using the OpenAI client
|
||||
response = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo-0125",
|
||||
messages=messages,
|
||||
tools=calculate_bmi,
|
||||
tool_choice="auto", # auto is default, but we'll be explicit
|
||||
)
|
||||
|
||||
# Print the generated response from the chat completion
|
||||
print(response.choices[0].message["content"])
|
@ -1,97 +0,0 @@
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
|
||||
class CodeExecutor:
|
||||
"""
|
||||
A class for executing code snippets.
|
||||
|
||||
Args:
|
||||
code (str, optional): The code snippet to be executed. Defaults to None.
|
||||
|
||||
Methods:
|
||||
is_python_code(code: str = None) -> bool:
|
||||
Checks if the given code is Python code.
|
||||
|
||||
run_python(code: str = None) -> str:
|
||||
Executes the given Python code and returns the output.
|
||||
|
||||
run(code: str = None) -> str:
|
||||
Executes the given code and returns the output.
|
||||
|
||||
__call__() -> str:
|
||||
Executes the code and returns the output.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.code = None
|
||||
|
||||
def run_python(self, code: str = None) -> str:
|
||||
"""
|
||||
Executes the given Python code and returns the output.
|
||||
|
||||
Args:
|
||||
code (str, optional): The Python code to be executed. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The output of the code execution.
|
||||
"""
|
||||
code = code or self.code
|
||||
try:
|
||||
# Create a temporary file
|
||||
with tempfile.NamedTemporaryFile(
|
||||
suffix=".py", delete=False
|
||||
) as temp:
|
||||
temp.write(code.encode())
|
||||
temp_filename = temp.name
|
||||
|
||||
# Execute the temporary file
|
||||
output = subprocess.check_output(
|
||||
f"python {temp_filename}",
|
||||
shell=True,
|
||||
)
|
||||
|
||||
# Delete the temporary file
|
||||
os.remove(temp_filename)
|
||||
|
||||
return output.decode("utf-8")
|
||||
except subprocess.CalledProcessError as error:
|
||||
return error.output.decode("utf-8")
|
||||
except Exception as error:
|
||||
return str(error)
|
||||
|
||||
def run(self, code: str = None) -> str:
|
||||
"""
|
||||
Executes the given code and returns the output.
|
||||
|
||||
Args:
|
||||
code (str, optional): The code to be executed. Defaults to None.
|
||||
|
||||
Returns:
|
||||
str: The output of the code execution.
|
||||
"""
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
code,
|
||||
shell=True,
|
||||
)
|
||||
return output.decode("utf-8")
|
||||
except subprocess.CalledProcessError as e:
|
||||
return e.output.decode("utf-8")
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
def __call__(self, task: str, *args, **kwargs) -> str:
|
||||
"""
|
||||
Executes the code and returns the output.
|
||||
|
||||
Returns:
|
||||
str: The output of the code execution.
|
||||
"""
|
||||
return self.run(task, *args, **kwargs)
|
||||
|
||||
|
||||
# model = CodeExecutor()
|
||||
# out = model.run("python3")
|
||||
# print(out)
|
@ -1,112 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
import traceback
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
async def execute_code_async(code: str) -> Tuple[str, str]:
|
||||
"""
|
||||
This function takes a string of code as input, adds some documentation to it,
|
||||
and then attempts to execute the code asynchronously. If the code execution is successful,
|
||||
the function returns the new code and an empty string. If the code execution
|
||||
fails, the function returns the new code and the error message.
|
||||
|
||||
Args:
|
||||
code (str): The original code.
|
||||
|
||||
Returns:
|
||||
Tuple[str, str]: The new code with added documentation and the error message (if any).
|
||||
"""
|
||||
|
||||
# Validate the input
|
||||
if not isinstance(code, str):
|
||||
raise ValueError("The code must be a string.")
|
||||
|
||||
# Add some documentation to the code
|
||||
documentation = """
|
||||
'''
|
||||
This code has been prepared for deployment in an execution sandbox.
|
||||
'''
|
||||
"""
|
||||
|
||||
# Combine the documentation and the original code
|
||||
new_code = documentation + "\n" + code
|
||||
|
||||
# Attempt to execute the code
|
||||
error_message = ""
|
||||
try:
|
||||
# Use a secure environment to execute the code (e.g., a Docker container)
|
||||
# This is just a placeholder and would require additional setup and dependencies
|
||||
# exec_in_docker(new_code)
|
||||
out = exec(new_code)
|
||||
return out
|
||||
# logging.info("Code executed successfully.")
|
||||
except Exception:
|
||||
error_message = traceback.format_exc()
|
||||
logging.error("Code execution failed. Error: %s", error_message)
|
||||
|
||||
# Return the new code and the error message
|
||||
return out, error_message
|
||||
|
||||
|
||||
def execute_code_in_sandbox(code: str, language: str = "python"):
|
||||
"""
|
||||
Execute code in a specified language using subprocess and return the results or errors.
|
||||
|
||||
Args:
|
||||
code (str): The code to be executed.
|
||||
language (str): The programming language of the code. Currently supports 'python' only.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing either the result or any errors.
|
||||
"""
|
||||
result = {"output": None, "errors": None}
|
||||
|
||||
try:
|
||||
if language == "python":
|
||||
# Write the code to a temporary file
|
||||
with tempfile.NamedTemporaryFile(
|
||||
delete=False, suffix=".py", mode="w"
|
||||
) as tmp:
|
||||
tmp.write(code)
|
||||
tmp_path = tmp.name
|
||||
|
||||
# Execute the code in a separate process
|
||||
process = subprocess.run(
|
||||
["python", tmp_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
# Capture the output and errors
|
||||
result["output"] = process.stdout
|
||||
result["errors"] = process.stderr
|
||||
|
||||
else:
|
||||
# Placeholder for other languages; each would need its own implementation
|
||||
raise NotImplementedError(
|
||||
f"Execution for {language} not implemented."
|
||||
)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
result["errors"] = "Execution timed out."
|
||||
except Exception as e:
|
||||
result["errors"] = str(e)
|
||||
finally:
|
||||
# Ensure the temporary file is removed after execution
|
||||
if "tmp_path" in locals():
|
||||
os.remove(tmp_path)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# # Example usage
|
||||
# code_to_execute = """
|
||||
# print("Hello, world!")
|
||||
# """
|
||||
|
||||
# execution_result = execute_code(code_to_execute)
|
||||
# print(json.dumps(execution_result, indent=4))
|
@ -1,40 +0,0 @@
|
||||
import concurrent.futures
|
||||
from typing import Any, Callable, Dict, List
|
||||
from inspect import iscoroutinefunction
|
||||
import asyncio
|
||||
|
||||
|
||||
# Helper function to run an asynchronous function in a synchronous way
|
||||
def run_async_function_in_sync(func: Callable, *args, **kwargs) -> Any:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
coroutine = func(*args, **kwargs)
|
||||
return loop.run_until_complete(coroutine)
|
||||
|
||||
|
||||
# Main omni function for parallel execution
|
||||
def omni_parallel_function_caller(
|
||||
function_calls: List[Dict[str, Any]]
|
||||
) -> List[Any]:
|
||||
results = []
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future_to_call = {}
|
||||
for call in function_calls:
|
||||
func = call["function"]
|
||||
args = call.get("args", ())
|
||||
kwargs = call.get("kwargs", {})
|
||||
|
||||
if iscoroutinefunction(func):
|
||||
# Wrap and execute asynchronous function in a separate process
|
||||
future = executor.submit(
|
||||
run_async_function_in_sync, func, *args, **kwargs
|
||||
)
|
||||
else:
|
||||
# Directly execute synchronous function in a thread
|
||||
future = executor.submit(func, *args, **kwargs)
|
||||
|
||||
future_to_call[future] = call
|
||||
|
||||
for future in concurrent.futures.as_completed(future_to_call):
|
||||
results.append(future.result())
|
||||
return results
|
@ -0,0 +1,81 @@
|
||||
from functools import wraps
|
||||
|
||||
from swarms.tools.py_func_to_openai_func_str import (
|
||||
get_openai_function_schema_from_func,
|
||||
)
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
|
||||
def create_openai_tool(
|
||||
name: str = None,
|
||||
description: str = None,
|
||||
return_dict: bool = True,
|
||||
verbose: bool = True,
|
||||
return_string: bool = False,
|
||||
return_yaml: bool = False,
|
||||
):
|
||||
"""
|
||||
A decorator function that generates an OpenAI function schema.
|
||||
|
||||
Args:
|
||||
name (str, optional): The name of the OpenAI function. Defaults to None.
|
||||
description (str, optional): The description of the OpenAI function. Defaults to None.
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
|
||||
Returns:
|
||||
dict: The generated OpenAI function schema.
|
||||
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
# Log the function call
|
||||
logger.info(f"Creating Tool: {func.__name__}")
|
||||
|
||||
# Assert that the arguments are of the correct type
|
||||
assert isinstance(name, str), "name must be a string"
|
||||
assert isinstance(
|
||||
description, str
|
||||
), "description must be a string"
|
||||
assert isinstance(
|
||||
return_dict, bool
|
||||
), "return_dict must be a boolean"
|
||||
assert isinstance(
|
||||
verbose, bool
|
||||
), "verbose must be a boolean"
|
||||
|
||||
# Call the function
|
||||
func(*args, **kwargs)
|
||||
|
||||
# Get the openai function schema
|
||||
schema = get_openai_function_schema_from_func(
|
||||
func, name=name, description=description
|
||||
)
|
||||
|
||||
# Return the schema
|
||||
if return_dict:
|
||||
return schema
|
||||
elif return_string is True:
|
||||
return str(schema)
|
||||
elif return_yaml is True:
|
||||
# schema = YamlModel().dict_to_yaml(schema)
|
||||
return schema
|
||||
else:
|
||||
return schema
|
||||
|
||||
except AssertionError as e:
|
||||
# Log the assertion error
|
||||
logger.error(f"Assertion error: {str(e)}")
|
||||
raise
|
||||
|
||||
except Exception as e:
|
||||
# Log the exception
|
||||
logger.error(f"Exception occurred: {str(e)}")
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
Loading…
Reference in new issue