[EXAMPLES][Jamba ++ Command R Tool Agent]

pull/440/head
Kye 9 months ago
parent fefc884477
commit a86f5979af

@ -0,0 +1,59 @@
from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json
# Model name
model_name = "CohereForAI/c4ai-command-r-v01-4bit"
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
)
# Load the pre-trained model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Initialize the schema for the person's information
class APIExampleRequestSchema(BaseModel):
endpoint: str = Field(
..., description="The API endpoint for the example request"
)
method: str = Field(
..., description="The HTTP method for the example request"
)
headers: dict = Field(
..., description="The headers for the example request"
)
body: dict = Field(
..., description="The body of the example request"
)
response: dict = Field(
..., description="The expected response of the example request"
)
# Convert the schema to a JSON string
api_example_schema = base_model_to_json(APIExampleRequestSchema)
# Convert the schema to a JSON string
# Define the task to generate a person's information
task = (
"Generate an example API request using this code:\n"
)
# Create an instance of the ToolAgent class
agent = ToolAgent(
name="Command R Tool Agent",
description="An agent that generates an API request using the Command R model.",
model=model,
tokenizer=tokenizer,
json_schema=api_example_schema,
)
# Run the agent to generate the person's information
generated_data = agent.run(task)
# Print the generated data
print(f"Generated data: {generated_data}")

@ -0,0 +1,59 @@
from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json
# Model name
model_name = "ai21labs/Jamba-v0.1"
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
)
# Load the pre-trained model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Initialize the schema for the person's information
class APIExampleRequestSchema(BaseModel):
endpoint: str = Field(
..., description="The API endpoint for the example request"
)
method: str = Field(
..., description="The HTTP method for the example request"
)
headers: dict = Field(
..., description="The headers for the example request"
)
body: dict = Field(
..., description="The body of the example request"
)
response: dict = Field(
..., description="The expected response of the example request"
)
# Convert the schema to a JSON string
api_example_schema = base_model_to_json(APIExampleRequestSchema)
# Convert the schema to a JSON string
# Define the task to generate a person's information
task = (
"Generate an example API request using this code:\n"
)
# Create an instance of the ToolAgent class
agent = ToolAgent(
name="Command R Tool Agent",
description="An agent that generates an API request using the Command R model.",
model=model,
tokenizer=tokenizer,
json_schema=api_example_schema,
)
# Run the agent to generate the person's information
generated_data = agent(task)
# Print the generated data
print(f"Generated data: {generated_data}")

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "4.7.4"
version = "4.7.8"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -44,6 +44,8 @@ Pillow = "10.2.0"
rich = "13.5.2"
psutil = "*"
sentry-sdk = "*"
python-dotenv = "*"
accelerate = "0.28.0"
[tool.poetry.dev-dependencies]
black = "23.3.0"

@ -40,7 +40,7 @@ from swarms.utils.remove_json_whitespace import (
remove_whitespace_from_yaml,
)
from swarms.utils.save_logs import parse_log_file
from swarms.utils.supervision_visualizer import MarkVisualizer
# from swarms.utils.supervision_visualizer import MarkVisualizer
from swarms.utils.try_except_wrapper import try_except_wrapper
from swarms.utils.yaml_output_parser import YamlOutputParser
from swarms.utils.concurrent_utils import execute_concurrently
@ -77,8 +77,6 @@ __all__ = [
"remove_whitespace_from_json",
"remove_whitespace_from_yaml",
"parse_log_file",
"MarkVisualizer",
# "limit_tokens_from_string",
"try_except_wrapper",
"YamlOutputParser",
"execute_concurrently",

Loading…
Cancel
Save