Output readability improvement

pull/542/head
Nicolas Nahas 9 months ago
parent f602722924
commit 33ee2e5a40

@ -1,10 +1,28 @@
"""
* WORKING
What this script does:
Structured output example
Requirements:
Add the folowing API key(s) in your .env file:
- OPENAI_API_KEY (this example works best with Openai bc it uses openai function calling structure)
Note:
If you are running playground examples in the project files directly (without swarms installed via PIP),
make sure to add the project root to your PYTHONPATH by running the following command in the project's root directory:
'export PYTHONPATH=$(pwd):$PYTHONPATH'
"""
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from swarms import OpenAIChat from swarms import Agent, OpenAIChat
from swarms import Agent
# Initialize the schema for the person's information # Initialize the schema for the person's information
class Schema(BaseModel): class Schema(BaseModel):
"""
This is a pydantic class describing the format of a structured output
"""
name: str = Field(..., title="Name of the person") name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person") agent: int = Field(..., title="Age of the person")
is_student: bool = Field(..., title="Whether the person is a student") is_student: bool = Field(..., title="Whether the person is a student")
@ -12,15 +30,6 @@ class Schema(BaseModel):
..., title="List of courses the person is taking" ..., title="List of courses the person is taking"
) )
# Convert the schema to a JSON string
tool_schema = Schema(
name="Tool Name",
agent=1,
is_student=True,
courses=["Course1", "Course2"],
)
# Define the task to generate a person's information # Define the task to generate a person's information
task = "Generate a person's information based on the following schema:" task = "Generate a person's information based on the following schema:"
@ -30,22 +39,13 @@ agent = Agent(
system_prompt=( system_prompt=(
"Generate a person's information based on the following schema:" "Generate a person's information based on the following schema:"
), ),
# Set the tool schema to the JSON string -- this is the key difference
# tool_schema=tool_schema,
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=3, max_loops=1,
autosave=True,
dashboard=False,
streaming_on=True, streaming_on=True,
verbose=True, verbose=True,
interactive=True,
# Set the output type to the tool schema which is a BaseModel
# output_type=tool_schema, # or dict, or str
metadata_output_type="json",
# List of schemas that the agent can handle # List of schemas that the agent can handle
list_base_models=[tool_schema], list_base_models=[Schema],
function_calling_format_type="OpenAI", agent_ops_on=True
function_calling_type="json", # or soon yaml
) )
# Run the agent to generate the person's information # Run the agent to generate the person's information

@ -671,6 +671,10 @@ class Agent(BaseStructure):
########################## FUNCTION CALLING ########################## ########################## FUNCTION CALLING ##########################
def response_readability(self, response) -> str:
"""Specifies the name of the agent in capital letters in pink and the response text in blue."""
return f"\033[1;35m{self.name.upper()}:\033[0m \033[34m{response}\033[0m"
def run( def run(
self, self,
task: Optional[str] = None, task: Optional[str] = None,
@ -726,7 +730,7 @@ class Agent(BaseStructure):
response = self.llm( response = self.llm(
task_prompt, *args, **kwargs task_prompt, *args, **kwargs
) )
print(response) print(self.response_readability(response))
self.short_memory.add( self.short_memory.add(
role=self.agent_name, content=response role=self.agent_name, content=response
@ -746,7 +750,7 @@ class Agent(BaseStructure):
if img is None if img is None
else (task_prompt, img, *args) else (task_prompt, img, *args)
) )
response = self.llm(*response_args, **kwargs) response = self.response_readability(self.llm(*response_args, **kwargs))
# Print # Print
if self.streaming_on is True: if self.streaming_on is True:

Loading…
Cancel
Save