[REFACTOR][Agent]

pull/426/head
Kye 10 months ago
parent ab90a405c8
commit b3dc64cae6

@ -0,0 +1,17 @@
{
"agent_id": "<function agent_id at 0x16368c400>",
"agent_name": "Transcript Generator",
"agent_description": "Generate a transcript for a youtube video on what swarms are!",
"system_prompt": "\n You are a fully autonomous agent serving the user in automating tasks, workflows, and activities. \n Agent's use custom instructions, capabilities, and data to optimize LLMs for a more narrow set of tasks.\n \n You will have internal dialogues with yourself and or interact with the user to aid in these tasks. \n Your responses should be coherent, contextually relevant, and tailored to the task at hand.\n",
"sop": null,
"short_memory": "system: \n You are a fully autonomous agent serving the user in automating tasks, workflows, and activities. \n Agent's use custom instructions, capabilities, and data to optimize LLMs for a more narrow set of tasks.\n \n You will have internal dialogues with yourself and or interact with the user to aid in these tasks. \n Your responses should be coherent, contextually relevant, and tailored to the task at hand.\n\n\n\nHuman:: Generate a transcript for a youtube video on what swarms are!\n\n\nTranscript Generator: \nSwarms are composed of large numbers of independent individuals that collectively carry out complex behaviors. For example, an ant colony functions as a swarm - each ant follows simple rules but together the colony can build intricate nests and find food.\n\nIn artificial swarms, we try to emulate these naturally-occurring phenomena. By programming basic behaviors into agents and allowing them to interact, we can observe emergent group behaviors without centralized control. For example, groups of robots may be designed with attraction and repulsion forces to self-assemble or explore environments.\n\nSimilarly, swarms may allow optimization algorithms to explore solutions in parallel. Each program follows their own trajectory while sharing information to converge on the best result. High-level commands give a rough direction, but the specific behaviors emerge from the interactions at the local level. \n\nPotential applications of artificial swarms include self-configuring robot teams for search & rescue, intelligent routing of network packets, and distributed processing for enhanced efficiency. The decentralized nature of swarms provides robustness, scalability and adaptability surpassing individual agents. \n\nBy harnessing simple local rules and interactions, swarm systems transcend the capabilities of any single member. They provide distributed solutions to coordinate large numbers independent agents to achieve a collective purpose.\n\n\nTranscript Generator: \nSwarms are composed of large numbers of independent individuals that collectively carry out complex behaviors. For example, an ant colony functions as a swarm - each ant follows simple rules but together the colony can build intricate nests and find food.\n\nIn artificial swarms, we try to emulate these naturally-occurring phenomena. By programming basic behaviors into agents and allowing them to interact, we can observe emergent group behaviors without centralized control. For example, groups of robots may be designed with attraction and repulsion forces to self-assemble or explore environments.\n\nSimilarly, swarms may allow optimization algorithms to explore solutions in parallel. Each program follows their own trajectory while sharing information to converge on the best result. High-level commands give a rough direction, but the specific behaviors emerge from the interactions at the local level. \n\nPotential applications of artificial swarms include self-configuring robot teams for search & rescue, intelligent routing of network packets, and distributed processing for enhanced efficiency. The decentralized nature of swarms provides robustness, scalability and adaptability surpassing individual agents. \n\nBy harnessing simple local rules and interactions, swarm systems transcend the capabilities of any single member. They provide distributed solutions to coordinate large numbers independent agents to achieve a collective purpose.\n\n\nHuman:: what is your purpose\n\n",
"loop_interval": 1,
"retry_attempts": 3,
"retry_interval": 1,
"interactive": true,
"dashboard": false,
"dynamic_temperature": false,
"autosave": true,
"saved_state_path": "Transcript Generator_state.json",
"max_loops": 1
}

@ -9,16 +9,17 @@ agent = Agent(
" are!" " are!"
), ),
llm=Anthropic(), llm=Anthropic(),
max_loops="auto", max_loops=3,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True, streaming_on=True,
verbose=True, verbose=True,
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True,
) )
# Run the workflow on a task # Run the workflow on a task
agent( out = agent(
"Generate a transcript for a youtube video on what swarms are!" "Generate a transcript for a youtube video on what swarms are!"
" Output a <DONE> token when done."
) )
print(out)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "4.3.7" version = "4.5.8"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
@ -33,8 +33,7 @@ google-generativeai = "0.3.1"
langchain = "0.1.13" langchain = "0.1.13"
langchain-core = "0.1.33" langchain-core = "0.1.33"
langchain-community = "0.0.29" langchain-community = "0.0.29"
langsmith = "0.1.17" langchain-experimental = "0.0.55"
langchain-openai = "0.0.5"
faiss-cpu = "1.7.4" faiss-cpu = "1.7.4"
backoff = "2.2.1" backoff = "2.2.1"
datasets = "*" datasets = "*"
@ -42,7 +41,7 @@ optimum = "1.15.0"
supervision = "0.19.0" supervision = "0.19.0"
opencv-python = "4.9.0.80" opencv-python = "4.9.0.80"
diffusers = "*" diffusers = "*"
anthropic = "0.2.5" anthropic = "0.21.3"
toml = "*" toml = "*"
pypdf = "4.1.0" pypdf = "4.1.0"
accelerate = "*" accelerate = "*"
@ -64,7 +63,6 @@ sentence-transformers = "*"
peft = "*" peft = "*"
psutil = "*" psutil = "*"
timm = "*" timm = "*"
supervision = "*"
sentry-sdk = "*" sentry-sdk = "*"
[tool.poetry.dev-dependencies] [tool.poetry.dev-dependencies]

@ -2,6 +2,7 @@ from typing import Any, Optional, Callable
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.tools.format_tools import Jsonformer from swarms.tools.format_tools import Jsonformer
from swarms.utils.loguru_logger import logger
class ToolAgent(Agent): class ToolAgent(Agent):
@ -68,13 +69,14 @@ class ToolAgent(Agent):
json_schema: Any = None, json_schema: Any = None,
max_number_tokens: int = 500, max_number_tokens: int = 500,
parsing_function: Optional[Callable] = None, parsing_function: Optional[Callable] = None,
llm: Any = None,
*args, *args,
**kwargs, **kwargs,
): ):
super().__init__( super().__init__(
agent_name=name, agent_name=name,
agent_description=description, agent_description=description,
sop=f"{name} {description} {str(json_schema)}" * args, llm=llm,
**kwargs, **kwargs,
) )
self.name = name self.name = name
@ -101,10 +103,13 @@ class ToolAgent(Agent):
Exception: If an error occurs during the execution of the tool agent. Exception: If an error occurs during the execution of the tool agent.
""" """
try: try:
if self.model:
logger.info(f"Running {self.name} for task: {task}")
self.toolagent = Jsonformer( self.toolagent = Jsonformer(
model=self.model, model=self.model,
tokenizer=self.tokenizer, tokenizer=self.tokenizer,
json_schema=self.json_schema, json_schema=self.json_schema,
llm=self.llm,
prompt=task, prompt=task,
max_number_tokens=self.max_number_tokens, max_number_tokens=self.max_number_tokens,
*args, *args,
@ -117,17 +122,32 @@ class ToolAgent(Agent):
out = self.toolagent() out = self.toolagent()
return out return out
except Exception as error: elif self.llm:
print(f"[Error] [ToolAgent] {error}") logger.info(f"Running {self.name} for task: {task}")
raise error self.toolagent = Jsonformer(
json_schema=self.json_schema,
llm=self.llm,
prompt=task,
max_number_tokens=self.max_number_tokens,
*args,
**kwargs,
)
def __call__(self, task: str, *args, **kwargs): if self.parsing_function:
"""Call self as a function. out = self.parsing_function(self.toolagent())
else:
out = self.toolagent()
Args: return out
task (str): _description_
Returns: else:
_type_: _description_ raise Exception(
""" "Either model or llm should be provided to the"
return self.run(task, *args, **kwargs) " ToolAgent"
)
except Exception as error:
logger.error(
f"Error running {self.name} for task: {task}"
)
raise error

@ -1,4 +1,4 @@
from abc import ABC, abstractmethod from abc import ABC
class AbstractVectorDatabase(ABC): class AbstractVectorDatabase(ABC):
@ -12,7 +12,6 @@ class AbstractVectorDatabase(ABC):
""" """
@abstractmethod
def connect(self): def connect(self):
""" """
Connect to the database. Connect to the database.
@ -21,7 +20,6 @@ class AbstractVectorDatabase(ABC):
""" """
@abstractmethod
def close(self): def close(self):
""" """
Close the database connection. Close the database connection.
@ -30,7 +28,6 @@ class AbstractVectorDatabase(ABC):
""" """
@abstractmethod
def query(self, query: str): def query(self, query: str):
""" """
Execute a database query. Execute a database query.
@ -42,7 +39,6 @@ class AbstractVectorDatabase(ABC):
""" """
@abstractmethod
def fetch_all(self): def fetch_all(self):
""" """
Fetch all rows from the result set. Fetch all rows from the result set.
@ -54,7 +50,6 @@ class AbstractVectorDatabase(ABC):
""" """
@abstractmethod
def fetch_one(self): def fetch_one(self):
""" """
Fetch one row from the result set. Fetch one row from the result set.
@ -66,7 +61,6 @@ class AbstractVectorDatabase(ABC):
""" """
@abstractmethod
def add(self, doc: str): def add(self, doc: str):
""" """
Add a new record to the database. Add a new record to the database.
@ -79,7 +73,6 @@ class AbstractVectorDatabase(ABC):
""" """
@abstractmethod
def get(self, query: str): def get(self, query: str):
""" """
Get a record from the database. Get a record from the database.
@ -95,7 +88,6 @@ class AbstractVectorDatabase(ABC):
""" """
@abstractmethod
def update(self, doc): def update(self, doc):
""" """
Update a record in the database. Update a record in the database.
@ -109,7 +101,6 @@ class AbstractVectorDatabase(ABC):
""" """
@abstractmethod
def delete(self, message): def delete(self, message):
""" """
Delete a record from the database. Delete a record from the database.

@ -0,0 +1,10 @@
from swarms.models.popular_llms import OpenAIChat
class MistralAPILLM(OpenAIChat):
def __init__(self, url):
super().__init__()
self.openai_proxy_url = url
def __call__(self, task: str):
super().__call__(task)

@ -119,7 +119,6 @@ class TogetherLLM(AbstractLLM):
) )
out = response.json() out = response.json()
if "choices" in out and out["choices"]:
content = ( content = (
out["choices"][0] out["choices"][0]
.get("message", {}) .get("message", {})
@ -127,10 +126,8 @@ class TogetherLLM(AbstractLLM):
) )
if self.streaming_enabled: if self.streaming_enabled:
content = self.stream_response(content) content = self.stream_response(content)
return content return content
else:
print("No valid response in 'choices'")
return None
except Exception as error: except Exception as error:
print( print(

@ -19,16 +19,13 @@ from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
) )
from swarms.prompts.worker_prompt import worker_tools_sop_promp from swarms.prompts.worker_prompt import worker_tools_sop_promp
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.structs.schemas import Step
from swarms.tokenizers.base_tokenizer import BaseTokenizer from swarms.tokenizers.base_tokenizer import BaseTokenizer
from swarms.tools.exec_tool import execute_tool_by_name
from swarms.tools.tool import BaseTool from swarms.tools.tool import BaseTool
from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.data_to_text import data_to_text from swarms.utils.data_to_text import data_to_text
from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.token_count_tiktoken import limit_tokens_from_string from swarms.utils.token_count_tiktoken import limit_tokens_from_string
from swarms.utils.execution_sandbox import execute_code_in_sandbox
# Utils # Utils
@ -207,6 +204,10 @@ class Agent:
evaluator: Optional[Callable] = None, evaluator: Optional[Callable] = None,
output_json: bool = False, output_json: bool = False,
stopping_func: Optional[Callable] = None, stopping_func: Optional[Callable] = None,
custom_loop_condition: Optional[Callable] = None,
sentiment_threshold: Optional[float] = None,
custom_exit_command: Optional[str] = "exit",
sentiment_analyzer: Optional[Callable] = None,
*args, *args,
**kwargs, **kwargs,
): ):
@ -262,6 +263,10 @@ class Agent:
self.evaluator = evaluator self.evaluator = evaluator
self.output_json = output_json self.output_json = output_json
self.stopping_func = stopping_func self.stopping_func = stopping_func
self.custom_loop_condition = custom_loop_condition
self.sentiment_threshold = sentiment_threshold
self.custom_exit_command = custom_exit_command
self.sentiment_analyzer = sentiment_analyzer
# The max_loops will be set dynamically if the dynamic_loop # The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops: if self.dynamic_loops:
@ -559,190 +564,161 @@ class Agent:
): ):
""" """
Run the autonomous agent loop Run the autonomous agent loop
Args:
task (str): The initial task to run
Agent:
1. Generate a response
2. Check stopping condition
3. If stopping condition is met, stop
4. If stopping condition is not met, generate a response
5. Repeat until stopping condition is met or max_loops is reached
""" """
try: try:
# Activate Autonomous agent message
self.activate_autonomous_agent() self.activate_autonomous_agent()
# response = task # or combined_prompt if task:
history = self._history(self.user_name, task) self.short_memory.add(
role=self.user_name, content=task
# If dashboard = True then print the dashboard )
if self.dashboard:
self.print_dashboard(task)
loop_count = 0 loop_count = 0
response = None response = None
# While the max_loops is auto or the loop count is less than the max_loops
while ( while (
self.max_loops == "auto" self.max_loops == "auto"
or loop_count < self.max_loops or loop_count < self.max_loops
# or self.custom_loop_condition()
): ):
# Loop count
loop_count += 1 loop_count += 1
self.loop_count_print(loop_count, self.max_loops) self.loop_count_print(loop_count, self.max_loops)
print("\n") print("\n")
# Adjust temperature, comment if no work
if self.dynamic_temperature_enabled: if self.dynamic_temperature_enabled:
print(colored("Adjusting temperature...", "blue"))
self.dynamic_temperature() self.dynamic_temperature()
# Preparing the prompt task_prompt = (
task = self.agent_history_prompt(history=task) self.short_memory.return_history_as_string()
)
attempt = 0 attempt = 0
while attempt < self.retry_attempts: success = False
while attempt < self.retry_attempts and not success:
try: try:
if img: response_args = (
response = self.llm( (task_prompt, *args)
task, if img is None
img, else (task_prompt, img, *args)
**kwargs,
)
print(response)
else:
response = self.llm(
task,
**kwargs,
) )
response = self.llm(*response_args, **kwargs)
print(response) print(response)
self.short_memory.add(
if self.output_json: role=self.agent_name, content=response
response = extract_code_from_markdown(
response
) )
# Code interpreter
if self.code_interpreter: if self.code_interpreter:
response = extract_code_from_markdown( extracted_code = (
response extract_code_from_markdown(response)
) )
# Execute the code in the sandbox task_prompt += extracted_code
response = execute_code_in_sandbox(
response
)
response = task + response
response = self.llm( response = self.llm(
response, *args, **kwargs task_prompt, *args, **kwargs
) )
self.short_memory.add(
# Add the response to the history role=self.agent_name, content=response
history.append(response)
# Log each step
step = Step(
input=str(task),
task_id=str(task_id),
step_id=str(step_id),
output=str(response),
status="running",
) )
if self.evaluator: if self.evaluator:
evaluated_response = self.evaluator( evaluated_response = self.evaluator(
response response
) )
out = ( print(
f"Response: {response}\nEvaluated" "Evaluated Response:"
f" Response: {evaluated_response}" f" {evaluated_response}"
) )
out = self.short_memory.add( self.short_memory.add(
"Evaluator", out role=self.agent_name,
content=evaluated_response,
) )
# Stopping logic for agents # Sentiment analysis
if self.stopping_token: if self.sentiment_analyzer:
# Check if the stopping token is in the response sentiment = self.sentiment_analyzer(
if self.stopping_token in response:
break
if self.stopping_condition:
if self._check_stopping_condition(
response response
): )
break print(f"Sentiment: {sentiment}")
if self.stopping_func is not None: if sentiment > self.sentiment_threshold:
if self.stopping_func(response) is True: print(
break f"Sentiment: {sentiment} is above"
" threshold:"
f" {self.sentiment_threshold}"
)
elif sentiment < self.sentiment_threshold:
print(
f"Sentiment: {sentiment} is below"
" threshold:"
f" {self.sentiment_threshold}"
)
# If the stopping condition is met then break # print(f"Sentiment: {sentiment}")
self.step_cache.append(step) self.short_memory.add(
logging.info(f"Step: {step}") role=self.agent_name,
content=sentiment,
)
# If parser exists then parse the response success = True # Mark as successful to exit the retry loop
if self.parser:
response = self.parser(response)
# If tools are enabled then execute the tools except Exception as e:
if self.tools: logger.error(
execute_tool_by_name( f"Attempt {attempt+1}: Error generating"
response, f" response: {e}"
self.tools,
self.stopping_condition,
) )
attempt += 1
if not success:
logger.error(
"Failed to generate a valid response after"
" retry attempts."
)
break # Exit the loop if all retry attempts fail
# Check stopping conditions
if (
self.stopping_token
and self.stopping_token in response
):
break
elif (
self.stopping_condition
and self._check_stopping_condition(response)
):
break
elif self.stopping_func and self.stopping_func(
response
):
break
# If interactive mode is enabled then print the response and get user input
if self.interactive: if self.interactive:
print(f"AI: {response}") user_input = input("You: ")
history.append(f"AI: {response}")
response = input("You: ")
history.append(f"Human: {response}")
# If interactive mode is not enabled then print the response # User-defined exit command
else: if (
# print(f"AI: {response}") user_input.lower()
history.append(f"AI: {response}") == self.custom_exit_command.lower()
# print(response) ):
print("Exiting as per user request.")
break break
except Exception as e:
logging.error(
f"Error generating response: {e}"
)
attempt += 1
time.sleep(self.retry_interval)
time.sleep(self.loop_interval)
# Add the history to the memory
self.short_memory.add( self.short_memory.add(
role=self.agent_name, content=history role=self.user_name, content=user_input
) )
# If autosave is enabled then save the state if self.loop_interval:
if self.autosave: logger.info(
print( f"Sleeping for {self.loop_interval} seconds"
colored(
(
"Autosaving agent state to"
f" {self.saved_state_path}"
),
"green",
)
) )
self.save_state(self.saved_state_path) time.sleep(self.loop_interval)
# If return history is enabled then return the response and history if self.autosave:
if self.return_history: logger.info("Autosaving agent state.")
return response, history self.save_state(self.saved_state_path)
return response return response
except Exception as error: except Exception as error:
logger.error(f"Error running agent: {error}") print(f"Error running agent: {error}")
raise raise error
def __call__(self, task: str, img: str = None, *args, **kwargs): def __call__(self, task: str, img: str = None, *args, **kwargs):
"""Call the agent """Call the agent
@ -751,7 +727,11 @@ class Agent:
task (str): _description_ task (str): _description_
img (str, optional): _description_. Defaults to None. img (str, optional): _description_. Defaults to None.
""" """
try:
self.run(task, img, *args, **kwargs) self.run(task, img, *args, **kwargs)
except Exception as error:
logger.error(f"Error calling agent: {error}")
raise
def agent_history_prompt( def agent_history_prompt(
self, self,

@ -1,12 +1,14 @@
import yaml
import json
import asyncio import asyncio
import json
from abc import ABC from abc import ABC
from concurrent.futures import ThreadPoolExecutor, as_completed from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, Callable, Dict, List, Optional, Sequence from typing import Any, Callable, Dict, List, Optional, Sequence
from swarms.utils.loguru_logger import logger
import yaml
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.utils.loguru_logger import logger
class AbstractSwarm(ABC): class AbstractSwarm(ABC):
@ -34,7 +36,7 @@ class AbstractSwarm(ABC):
assign_task: Assign a task to a agent assign_task: Assign a task to a agent
get_all_tasks: Get all tasks get_all_tasks: Get all tasks
get_finished_tasks: Get all finished tasks get_finished_tasks: Get all finished tasks
get_pending_tasks: Get all pending tasks get_pending_tasks: Get all penPding tasks
pause_agent: Pause a agent pause_agent: Pause a agent
resume_agent: Resume a agent resume_agent: Resume a agent
stop_agent: Stop a agent stop_agent: Stop a agent
@ -58,7 +60,8 @@ class AbstractSwarm(ABC):
def __init__( def __init__(
self, self,
agents: List[Agent], agents: List[Agent] = None,
models: List[Any] = None,
max_loops: int = 200, max_loops: int = 200,
callbacks: Optional[Sequence[callable]] = None, callbacks: Optional[Sequence[callable]] = None,
autosave: bool = False, autosave: bool = False,
@ -73,6 +76,7 @@ class AbstractSwarm(ABC):
): ):
"""Initialize the swarm with agents""" """Initialize the swarm with agents"""
self.agents = agents self.agents = agents
self.models = models
self.max_loops = max_loops self.max_loops = max_loops
self.callbacks = callbacks self.callbacks = callbacks
self.autosave = autosave self.autosave = autosave
@ -82,6 +86,7 @@ class AbstractSwarm(ABC):
self.stopping_function = stopping_function self.stopping_function = stopping_function
self.stopping_condition = stopping_condition self.stopping_condition = stopping_condition
self.stopping_condition_args = stopping_condition_args self.stopping_condition_args = stopping_condition_args
self.conversation = Conversation( self.conversation = Conversation(
time_enabled=True, *args, **kwargs time_enabled=True, *args, **kwargs
) )
@ -117,6 +122,29 @@ class AbstractSwarm(ABC):
if autosave: if autosave:
self.save_to_json(metadata_filename) self.save_to_json(metadata_filename)
# Handle logging
if self.agents:
logger.info(
f"Swarm initialized with {len(self.agents)} agents"
)
# Handle stopping function
if stopping_function is not None:
if not callable(stopping_function):
raise TypeError("Stopping function must be callable.")
if stopping_condition_args is None:
stopping_condition_args = {}
self.stopping_condition_args = stopping_condition_args
self.stopping_condition = stopping_condition
self.stopping_function = stopping_function
# Handle stopping condition
if stopping_condition is not None:
if stopping_condition_args is None:
stopping_condition_args = {}
self.stopping_condition_args = stopping_condition_args
self.stopping_condition = stopping_condition
# @abstractmethod # @abstractmethod
def communicate(self): def communicate(self):
"""Communicate with the swarm through the orchestrator, protocols, and the universal communication layer""" """Communicate with the swarm through the orchestrator, protocols, and the universal communication layer"""
@ -124,6 +152,7 @@ class AbstractSwarm(ABC):
# @abstractmethod # @abstractmethod
def run(self): def run(self):
"""Run the swarm""" """Run the swarm"""
...
def __call__( def __call__(
self, self,
@ -139,7 +168,11 @@ class AbstractSwarm(ABC):
Returns: Returns:
_type_: _description_ _type_: _description_
""" """
try:
return self.run(task, *args, **kwargs) return self.run(task, *args, **kwargs)
except Exception as error:
logger.error(f"Error running {self.__class__.__name__}")
raise error
def step(self): def step(self):
"""Step the swarm""" """Step the swarm"""

@ -19,7 +19,14 @@ class BaseWorkflow(BaseStructure):
""" """
def __init__(self, *args, **kwargs): def __init__(
self,
agents: List[Agent] = None,
task_pool: List[Task] = None,
models: List[Any] = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.task_pool = [] self.task_pool = []
self.agent_pool = [] self.agent_pool = []
@ -69,13 +76,14 @@ class BaseWorkflow(BaseStructure):
""" """
Abstract method to run the workflow. Abstract method to run the workflow.
""" """
raise NotImplementedError("You must implement this method") ...
def __sequential_loop(self): def __sequential_loop(self):
""" """
Abstract method for the sequential loop. Abstract method for the sequential loop.
""" """
# raise NotImplementedError("You must implement this method") # raise NotImplementedError("You must implement this method")
...
def __log(self, message: str): def __log(self, message: str):
""" """

@ -1,7 +1,7 @@
from swarms.tokenizers.anthropic_tokenizer import ( # from swarms.tokenizers.anthropic_tokenizer import (
AnthropicTokenizer, # AnthropicTokenizer,
import_optional_dependency, # import_optional_dependency,
) # )
from swarms.tokenizers.base_tokenizer import BaseTokenizer from swarms.tokenizers.base_tokenizer import BaseTokenizer
from swarms.tokenizers.openai_tokenizers import OpenAITokenizer from swarms.tokenizers.openai_tokenizers import OpenAITokenizer
from swarms.tokenizers.r_tokenizers import ( from swarms.tokenizers.r_tokenizers import (
@ -10,12 +10,13 @@ from swarms.tokenizers.r_tokenizers import (
Tokenizer, Tokenizer,
) )
__all__ = [ __all__ = [
"SentencePieceTokenizer", "SentencePieceTokenizer",
"HuggingFaceTokenizer", "HuggingFaceTokenizer",
"Tokenizer", "Tokenizer",
"BaseTokenizer", "BaseTokenizer",
"OpenAITokenizer", "OpenAITokenizer",
"import_optional_dependency", # "import_optional_dependency",
"AnthropicTokenizer", # "AnthropicTokenizer",
] ]

@ -3,12 +3,13 @@ from typing import Any, Dict, List, Union
from termcolor import cprint from termcolor import cprint
from transformers import PreTrainedModel, PreTrainedTokenizer from transformers import PreTrainedModel, PreTrainedTokenizer
from pydantic import BaseModel
from swarms.tools.logits_processor import ( from swarms.tools.logits_processor import (
NumberStoppingCriteria, NumberStoppingCriteria,
OutputNumbersTokens, OutputNumbersTokens,
StringStoppingCriteria, StringStoppingCriteria,
) )
from swarms.models.base_llm import AbstractLLM
GENERATION_MARKER = "|GENERATION|" GENERATION_MARKER = "|GENERATION|"
@ -35,21 +36,25 @@ class Jsonformer:
def __init__( def __init__(
self, self,
model: PreTrainedModel, model: PreTrainedModel = None,
tokenizer: PreTrainedTokenizer, tokenizer: PreTrainedTokenizer = None,
json_schema: Dict[str, Any], json_schema: Union[Dict[str, Any], BaseModel] = None,
prompt: str, schemas: List[Union[Dict[str, Any], BaseModel]] = [],
prompt: str = None,
*, *,
debug: bool = False, debug: bool = False,
max_array_length: int = 10, max_array_length: int = 10,
max_number_tokens: int = 6, max_number_tokens: int = 6,
temperature: float = 1.0, temperature: float = 1.0,
max_string_token_length: int = 10, max_string_token_length: int = 10,
llm: AbstractLLM = None,
): ):
self.model = model self.model = model
self.tokenizer = tokenizer self.tokenizer = tokenizer
self.json_schema = json_schema self.json_schema = json_schema
self.prompt = prompt self.prompt = prompt
self.llm = llm
self.schemas = schemas
self.number_logit_processor = OutputNumbersTokens( self.number_logit_processor = OutputNumbersTokens(
self.tokenizer, self.prompt self.tokenizer, self.prompt
@ -88,11 +93,13 @@ class Jsonformer:
Raises: Raises:
ValueError: If a valid number cannot be generated after 3 iterations. ValueError: If a valid number cannot be generated after 3 iterations.
""" """
if self.model:
prompt = self.get_prompt() prompt = self.get_prompt()
self.debug("[generate_number]", prompt, is_prompt=True) self.debug("[generate_number]", prompt, is_prompt=True)
input_tokens = self.tokenizer.encode( input_tokens = self.tokenizer.encode(
prompt, return_tensors="pt" prompt, return_tensors="pt"
).to(self.model.device) ).to(self.model.device)
response = self.model.generate( response = self.model.generate(
input_tokens, input_tokens,
max_new_tokens=self.max_number_tokens, max_new_tokens=self.max_number_tokens,
@ -117,13 +124,37 @@ class Jsonformer:
return float(response) return float(response)
except ValueError: except ValueError:
if iterations > 3: if iterations > 3:
raise ValueError("Failed to generate a valid number") raise ValueError(
"Failed to generate a valid number"
)
return self.generate_number(
temperature=self.temperature * 1.3,
iterations=iterations + 1,
)
elif self.llm:
prompt = self.get_prompt()
self.debug("[generate_number]", prompt, is_prompt=True)
response = self.llm(prompt)
response = response[len(prompt) :]
response = response.strip().rstrip(".")
self.debug("[generate_number]", response)
try:
return float(response)
except ValueError:
if iterations > 3:
raise ValueError(
"Failed to generate a valid number"
)
return self.generate_number( return self.generate_number(
temperature=self.temperature * 1.3, temperature=self.temperature * 1.3,
iterations=iterations + 1, iterations=iterations + 1,
) )
elif self.llm and self.model:
raise ValueError("Both LLM and model cannot be None")
def generate_boolean(self) -> bool: def generate_boolean(self) -> bool:
""" """
Generates a boolean value based on the given prompt. Generates a boolean value based on the given prompt.
@ -131,6 +162,7 @@ class Jsonformer:
Returns: Returns:
bool: The generated boolean value. bool: The generated boolean value.
""" """
if self.model:
prompt = self.get_prompt() prompt = self.get_prompt()
self.debug("[generate_boolean]", prompt, is_prompt=True) self.debug("[generate_boolean]", prompt, is_prompt=True)
@ -145,8 +177,12 @@ class Jsonformer:
# todo: this assumes that "true" and "false" are both tokenized to a single token # todo: this assumes that "true" and "false" are both tokenized to a single token
# this is probably not true for all tokenizers # this is probably not true for all tokenizers
# this can be fixed by looking at only the first token of both "true" and "false" # this can be fixed by looking at only the first token of both "true" and "false"
true_token_id = self.tokenizer.convert_tokens_to_ids("true") true_token_id = self.tokenizer.convert_tokens_to_ids(
false_token_id = self.tokenizer.convert_tokens_to_ids("false") "true"
)
false_token_id = self.tokenizer.convert_tokens_to_ids(
"false"
)
result = logits[true_token_id] > logits[false_token_id] result = logits[true_token_id] > logits[false_token_id]
@ -154,7 +190,19 @@ class Jsonformer:
return result.item() return result.item()
elif self.llm:
prompt = self.get_prompt()
self.debug("[generate_boolean]", prompt, is_prompt=True)
output = self.llm(prompt)
return output if output == "true" or "false" else None
else:
raise ValueError("Both LLM and model cannot be None")
def generate_string(self) -> str: def generate_string(self) -> str:
if self.model:
prompt = self.get_prompt() + '"' prompt = self.get_prompt() + '"'
self.debug("[generate_string]", prompt, is_prompt=True) self.debug("[generate_string]", prompt, is_prompt=True)
input_tokens = self.tokenizer.encode( input_tokens = self.tokenizer.encode(
@ -179,7 +227,8 @@ class Jsonformer:
if ( if (
len(response[0]) >= len(input_tokens[0]) len(response[0]) >= len(input_tokens[0])
and ( and (
response[0][: len(input_tokens[0])] == input_tokens response[0][: len(input_tokens[0])]
== input_tokens
).all() ).all()
): ):
response = response[0][len(input_tokens[0]) :] response = response[0][len(input_tokens[0]) :]
@ -197,6 +246,35 @@ class Jsonformer:
return response.split('"')[0].strip() return response.split('"')[0].strip()
elif self.llm:
prompt = self.get_prompt() + '"'
self.debug("[generate_string]", prompt, is_prompt=True)
response = self.llm(prompt)
# Some models output the prompt as part of the response
# This removes the prompt from the response if it is present
if (
len(response[0]) >= len(input_tokens[0])
and (
response[0][: len(input_tokens[0])]
== input_tokens
).all()
):
response = response[0][len(input_tokens[0]) :]
if response.shape[0] == 1:
response = response[0]
self.debug("[generate_string]", "|" + response + "|")
if response.count('"') < 1:
return response
return response.split('"')[0].strip()
else:
raise ValueError("Both LLM and model cannot be None")
def generate_object( def generate_object(
self, properties: Dict[str, Any], obj: Dict[str, Any] self, properties: Dict[str, Any], obj: Dict[str, Any]
) -> Dict[str, Any]: ) -> Dict[str, Any]:
@ -249,6 +327,7 @@ class Jsonformer:
def generate_array( def generate_array(
self, item_schema: Dict[str, Any], obj: Dict[str, Any] self, item_schema: Dict[str, Any], obj: Dict[str, Any]
) -> list: ) -> list:
if self.model:
for _ in range(self.max_array_length): for _ in range(self.max_array_length):
# forces array to have at least one element # forces array to have at least one element
element = self.generate_value(item_schema, obj) element = self.generate_value(item_schema, obj)
@ -287,6 +366,34 @@ class Jsonformer:
return obj return obj
elif self.llm:
for _ in range(self.max_array_length):
# forces array to have at least one element
element = self.generate_value(item_schema, obj)
obj[-1] = element
obj.append(self.generation_marker)
input_prompt = self.get_prompt()
obj.pop()
output = self.llm(input_prompt)
found_comma = False
found_close_bracket = False
for token_id in output:
decoded_token = str(token_id)
if "," in decoded_token:
found_comma = True
break
if "]" in decoded_token:
found_close_bracket = True
break
if found_close_bracket or not found_comma:
break
return obj
def get_prompt(self): def get_prompt(self):
template = """{prompt}\nOutput result in the following JSON schema format:\n{schema}\nResult: {progress}""" template = """{prompt}\nOutput result in the following JSON schema format:\n{schema}\nResult: {progress}"""
progress = json.dumps(self.value) progress = json.dumps(self.value)

@ -0,0 +1,8 @@
from langchain.tools import (
BaseTool,
Tool,
StructuredTool,
tool,
) # noqa F401
__all__ = ["BaseTool", "Tool", "StructuredTool", "tool"]

@ -0,0 +1,50 @@
import os
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from swarms import OpenAIChat, ToolAgent
from swarms.utils.json_utils import base_model_to_json
# Load the environment variables
load_dotenv()
# Initialize the OpenAIChat class
chat = OpenAIChat(
api_key=os.getenv("OPENAI_API"),
)
# Initialize the schema for the person's information
class Schema(BaseModel):
name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person")
is_student: bool = Field(
..., title="Whether the person is a student"
)
courses: list[str] = Field(
..., title="List of courses the person is taking"
)
# Convert the schema to a JSON string
tool_schema = base_model_to_json(Schema)
# Define the task to generate a person's information
task = (
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class
agent = ToolAgent(
name="dolly-function-agent",
description="Ana gent to create a child data",
llm=chat,
json_schema=tool_schema,
)
# Run the agent to generate the person's information
generated_data = agent(task)
# Print the generated data
print(f"Generated data: {generated_data}")
Loading…
Cancel
Save