parent
b9cd77fbdd
commit
61790fc40d
@ -1,68 +0,0 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from swarms import Agent
|
||||
from swarms.models.popular_llms import Anthropic
|
||||
from swarms.tools.openai_tool_creator_decorator import tool
|
||||
|
||||
|
||||
# Importing the search API tool
|
||||
@tool
|
||||
def search_api(query: str) -> str:
|
||||
"""
|
||||
This tool searches the web for information about COVID-19 symptoms.
|
||||
"""
|
||||
return f"Search API tool called with query: {query}"
|
||||
|
||||
|
||||
print(search_api("COVID-19 symptoms"))
|
||||
|
||||
|
||||
# Initialize the schema for the person's information
|
||||
class Schema(BaseModel):
|
||||
name: str = Field(..., title="Name of the person")
|
||||
agent: int = Field(..., title="Age of the person")
|
||||
is_student: bool = Field(..., title="Whether the person is a student")
|
||||
courses: list[str] = Field(
|
||||
..., title="List of courses the person is taking"
|
||||
)
|
||||
|
||||
|
||||
# Convert the schema to a JSON string
|
||||
tool_schema = Schema(
|
||||
name="Tool Name",
|
||||
agent=1,
|
||||
is_student=True,
|
||||
courses=["Course1", "Course2"],
|
||||
)
|
||||
|
||||
# Define the task to generate a person's information
|
||||
task = "Generate a person's information based on the following schema:"
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="WeatherMan Agent",
|
||||
# Set the tool schema to the JSON string -- this is the key difference
|
||||
tool_schema=tool_schema,
|
||||
llm=Anthropic(),
|
||||
max_loops=3,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
tools=[], # or list of tools
|
||||
verbose=True,
|
||||
interactive=True,
|
||||
# Set the output type to the tool schema which is a BaseModel
|
||||
output_type=tool_schema, # or dict, or str
|
||||
metadata_output_type="json",
|
||||
# List of schemas that the agent can handle
|
||||
list_tool_schemas=[tool_schema],
|
||||
function_calling_format_type="OpenAI",
|
||||
function_calling_type="json", # or soon yaml
|
||||
execute_tool=True,
|
||||
)
|
||||
|
||||
# Run the agent to generate the person's information
|
||||
generated_data = agent.run(task)
|
||||
|
||||
# Print the generated data
|
||||
print(f"Generated data: {generated_data}")
|
@ -1,11 +1,35 @@
|
||||
from swarms.models.gpt4v import GPT4Vision
|
||||
import os # Import the os module for working with the operating system
|
||||
|
||||
gpt4vision = GPT4Vision(openai_api_key="")
|
||||
from dotenv import (
|
||||
load_dotenv, # Import the load_dotenv function from the dotenv module
|
||||
)
|
||||
|
||||
img = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0d/VFPt_Solenoid_correct2.svg/640px-VFPt_Solenoid_correct2.svg.png"
|
||||
from swarms import (
|
||||
GPT4VisionAPI, # Import the GPT4VisionAPI class from the swarms module
|
||||
)
|
||||
|
||||
# Load the environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Get the API key from the environment variables
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the GPT4VisionAPI class with the API key and model name
|
||||
gpt4vision = GPT4VisionAPI(
|
||||
openai_api_key=api_key,
|
||||
model_name="gpt-4o",
|
||||
max_tokens=1000,
|
||||
openai_proxy="https://api.openai.com/v1/chat/completions",
|
||||
)
|
||||
|
||||
# Define the URL of the image to analyze
|
||||
img = "ear.png"
|
||||
|
||||
# Define the task to perform on the image
|
||||
task = "What is this image"
|
||||
|
||||
answer = gpt4vision.run(task, img)
|
||||
# Run the GPT4VisionAPI on the image with the specified task
|
||||
answer = gpt4vision.run(task, img, return_json=True)
|
||||
|
||||
# Print the answer
|
||||
print(answer)
|
||||
|
@ -0,0 +1,31 @@
|
||||
from dotenv import load_dotenv
|
||||
from openai import OpenAI
|
||||
import os
|
||||
|
||||
load_dotenv()
|
||||
|
||||
openai_api_key = os.getenv("SWARMS_API_KEY")
|
||||
openai_api_base = "https://api.swarms.world"
|
||||
model = "gpt-4o"
|
||||
|
||||
client = OpenAI(api_key=openai_api_key, base_url=openai_api_base)
|
||||
# Note that this model expects the image to come before the main text
|
||||
chat_response = client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "https://home-cdn.reolink.us/wp-content/uploads/2022/04/010345091648784709.4253.jpg",
|
||||
},
|
||||
},
|
||||
{"type": "text", "text": "What's in this image?"},
|
||||
],
|
||||
}
|
||||
],
|
||||
temperature=0.1,
|
||||
)
|
||||
print("Chat response:", chat_response)
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue