CLEANUP: swarms.agents, removed unused files

Former-commit-id: 07bcd22eef
grit/923f7c6f-0958-480b-8748-ea6bbf1c2084 2.1.9
Kye 1 year ago
parent bf9a747fa3
commit 4e2ce705aa

@ -13,7 +13,7 @@ black --experimental-string-processing swarms/
# Run ruff on the 'swarms' directory.
# Add any additional flags if needed according to your version of ruff.
ruff swarms/
x
#--unsafe_fix
# YAPF

@ -1,29 +0,0 @@
import os
import sys
from dotenv import load_dotenv
from swarms.models.revgptV4 import RevChatGPTModelv4
from swarms.models.revgptV1 import RevChatGPTModelv1
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_dir)
load_dotenv()
config = {
"model": os.getenv("REVGPT_MODEL"),
"plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")],
"disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True",
"PUID": os.getenv("REVGPT_PUID"),
"unverified_plugin_domains": [os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")],
}
# For v1 model
model = RevChatGPTModelv1(access_token=os.getenv("ACCESS_TOKEN"), **config)
# model = RevChatGPTModelv4(access_token=os.getenv("ACCESS_TOKEN"), **config)
# For v3 model
# model = RevChatGPTModel(access_token=os.getenv("OPENAI_API_KEY"), **config)
task = "Write a cli snake game"
response = model.run(task)
print(response)

@ -6,9 +6,9 @@ warnings.filterwarnings("ignore", category=UserWarning)
# disable tensorflow warnings
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from swarms.agents import *
from swarms.swarms import *
from swarms.structs import *
from swarms.models import *
from swarms.chunkers import *
from swarms.workers import *
from swarms.agents import * # noqa: E402, F403
from swarms.swarms import * # noqa: E402, F403
from swarms.structs import * # noqa: E402, F403
from swarms.models import * # noqa: E402, F403
from swarms.chunkers import * # noqa: E402, F403
from swarms.workers import * # noqa: E402, F403

@ -1,5 +1,4 @@
# from swarms.agents.omni_modal_agent import OmniModalAgent
from swarms.agents.hf_agents import HFAgent
from swarms.agents.message import Message
# from swarms.agents.stream_response import stream
@ -7,16 +6,12 @@ from swarms.agents.base import AbstractAgent
from swarms.agents.registry import Registry
# from swarms.agents.idea_to_image_agent import Idea2Image
from swarms.agents.simple_agent import SimpleAgent
"""Agent Infrastructure, models, memory, utils, tools"""
__all__ = [
# "OmniModalAgent",
"HFAgent",
"Message",
"AbstractAgent",
"Registry",
# "Idea2Image",
"SimpleAgent",
]

@ -1,433 +0,0 @@
from __future__ import annotations
import json
import time
from typing import Any, Callable, List, Optional
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ChatMessageHistory
from langchain.prompts.chat import (
BaseChatPromptTemplate,
)
from langchain.schema import (
BaseChatMessageHistory,
Document,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain.schema.vectorstore import VectorStoreRetriever
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain_experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain_experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
get_prompt,
)
from langchain_experimental.pydantic_v1 import BaseModel, ValidationError
# PROMPT
FINISH_NAME = "finish"
# This class has a metaclass conflict: both `BaseChatPromptTemplate` and `BaseModel`
# define a metaclass to use, and the two metaclasses attempt to define
# the same functions but in mutually-incompatible ways.
# It isn't clear how to resolve this, and this code predates mypy
# beginning to perform that check.
#
# Mypy errors:
# ```
# Definition of "__private_attributes__" in base class "BaseModel" is
# incompatible with definition in base class "BaseModel" [misc]
# Definition of "__repr_name__" in base class "Representation" is
# incompatible with definition in base class "BaseModel" [misc]
# Definition of "__pretty__" in base class "Representation" is
# incompatible with definition in base class "BaseModel" [misc]
# Definition of "__repr_str__" in base class "Representation" is
# incompatible with definition in base class "BaseModel" [misc]
# Definition of "__rich_repr__" in base class "Representation" is
# incompatible with definition in base class "BaseModel" [misc]
# Metaclass conflict: the metaclass of a derived class must be
# a (non-strict) subclass of the metaclasses of all its bases [misc]
# ```
#
# TODO: look into refactoring this class in a way that avoids the mypy type errors
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): # type: ignore[misc]
"""Prompt for AutoGPT."""
ai_name: str
ai_role: str
tools: List[BaseTool]
token_counter: Callable[[str], int]
send_token_limit: int = 4196
def construct_full_prompt(self, goals: List[str]) -> str:
prompt_start = (
"Your decisions must always be made independently "
"without seeking user assistance.\n"
"Play to your strengths as an LLM and pursue simple "
"strategies with no legal complications.\n"
"If you have completed all your tasks, make sure to "
'use the "finish" command.'
)
# Construct full prompt
full_prompt = (
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
)
for i, goal in enumerate(goals):
full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{get_prompt(self.tools)}"
return full_prompt
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"]))
time_prompt = SystemMessage(
content=f"The current time and date is {time.strftime('%c')}"
)
used_tokens = self.token_counter(base_prompt.content) + self.token_counter(
time_prompt.content
)
memory: VectorStoreRetriever = kwargs["memory"]
previous_messages = kwargs["messages"]
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
relevant_memory = [d.page_content for d in relevant_docs]
relevant_memory_tokens = sum(
[self.token_counter(doc) for doc in relevant_memory]
)
while used_tokens + relevant_memory_tokens > 2500:
relevant_memory = relevant_memory[:-1]
relevant_memory_tokens = sum(
[self.token_counter(doc) for doc in relevant_memory]
)
content_format = (
f"This reminds you of these events from your past:\n{relevant_memory}\n\n"
)
memory_message = SystemMessage(content=content_format)
used_tokens += self.token_counter(memory_message.content)
historical_messages: List[BaseMessage] = []
for message in previous_messages[-10:][::-1]:
message_tokens = self.token_counter(message.content)
if used_tokens + message_tokens > self.send_token_limit - 1000:
break
historical_messages = [message] + historical_messages
used_tokens += message_tokens
input_message = HumanMessage(content=kwargs["user_input"])
messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message]
messages += historical_messages
messages.append(input_message)
return messages
class PromptGenerator:
"""A class for generating custom prompt strings.
Does this based on constraints, commands, resources, and performance evaluations.
"""
def __init__(self) -> None:
"""Initialize the PromptGenerator object.
Starts with empty lists of constraints, commands, resources,
and performance evaluations.
"""
self.constraints: List[str] = []
self.commands: List[BaseTool] = []
self.resources: List[str] = []
self.performance_evaluation: List[str] = []
self.response_format = {
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user",
},
"command": {"name": "command name", "args": {"arg name": "value"}},
}
def add_constraint(self, constraint: str) -> None:
"""
Add a constraint to the constraints list.
Args:
constraint (str): The constraint to be added.
"""
self.constraints.append(constraint)
def add_tool(self, tool: BaseTool) -> None:
self.commands.append(tool)
def _generate_command_string(self, tool: BaseTool) -> str:
output = f"{tool.name}: {tool.description}"
output += f", args json schema: {json.dumps(tool.args)}"
return output
def add_resource(self, resource: str) -> None:
"""
Add a resource to the resources list.
Args:
resource (str): The resource to be added.
"""
self.resources.append(resource)
def add_performance_evaluation(self, evaluation: str) -> None:
"""
Add a performance evaluation item to the performance_evaluation list.
Args:
evaluation (str): The evaluation item to be added.
"""
self.performance_evaluation.append(evaluation)
def _generate_numbered_list(self, items: list, item_type: str = "list") -> str:
"""
Generate a numbered list from given items based on the item_type.
Args:
items (list): A list of items to be numbered.
item_type (str, optional): The type of items in the list.
Defaults to 'list'.
Returns:
str: The formatted numbered list.
"""
if item_type == "command":
command_strings = [
f"{i + 1}. {self._generate_command_string(item)}"
for i, item in enumerate(items)
]
finish_description = (
"use this to signal that you have finished all your objectives"
)
finish_args = (
'"response": "final response to let '
'people know you have finished your objectives"'
)
finish_string = (
f"{len(items) + 1}. {FINISH_NAME}: "
f"{finish_description}, args: {finish_args}"
)
return "\n".join(command_strings + [finish_string])
else:
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
def generate_prompt_string(self) -> str:
"""Generate a prompt string.
Returns:
str: The generated prompt string.
"""
formatted_response_format = json.dumps(self.response_format, indent=4)
prompt_string = (
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
"Commands:\n"
f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
"Performance Evaluation:\n"
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
"You should only respond in JSON format as described below "
f"\nResponse Format: \n{formatted_response_format} "
"\nEnsure the response can be parsed by Python json.loads"
)
return prompt_string
def get_prompt(tools: List[BaseTool]) -> str:
"""Generates a prompt string.
It includes various constraints, commands, resources, and performance evaluations.
Returns:
str: The generated prompt string.
"""
# Initialize the PromptGenerator object
prompt_generator = PromptGenerator()
# Add constraints to the PromptGenerator object
prompt_generator.add_constraint(
"~16000 word limit for short term memory. "
"Your short term memory is short, "
"so immediately save important information to files."
)
prompt_generator.add_constraint(
"If you are unsure how you previously did something "
"or want to recall past events, "
"thinking about similar events will help you remember."
)
prompt_generator.add_constraint("No user assistance")
prompt_generator.add_constraint(
'Exclusively use the commands listed in double quotes e.g. "command name"'
)
# Add commands to the PromptGenerator object
for tool in tools:
prompt_generator.add_tool(tool)
# Add resources to the PromptGenerator object
prompt_generator.add_resource(
"Internet access for searches and information gathering."
)
prompt_generator.add_resource("Long Term memory management.")
prompt_generator.add_resource(
"GPT-3.5 powered Agents for delegation of simple tasks."
)
prompt_generator.add_resource("File output.")
# Add performance evaluations to the PromptGenerator object
prompt_generator.add_performance_evaluation(
"Continuously review and analyze your actions "
"to ensure you are performing to the best of your abilities."
)
prompt_generator.add_performance_evaluation(
"Constructively self-criticize your big-picture behavior constantly."
)
prompt_generator.add_performance_evaluation(
"Reflect on past decisions and strategies to refine your approach."
)
prompt_generator.add_performance_evaluation(
"Every command has a cost, so be smart and efficient. "
"Aim to complete tasks in the least number of steps."
)
# Generate the prompt string
prompt_string = prompt_generator.generate_prompt_string()
return prompt_string
class AutoGPT:
"""
AutoAgent:
Args:
"""
def __init__(
self,
ai_name: str,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: BaseAutoGPTOutputParser,
tools: List[BaseTool],
feedback_tool: Optional[HumanInputRun] = None,
chat_history_memory: Optional[BaseChatMessageHistory] = None,
):
self.ai_name = ai_name
self.memory = memory
self.next_action_count = 0
self.chain = chain
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
self.chat_history_memory = chat_history_memory or ChatMessageHistory()
@classmethod
def from_llm_and_tools(
cls,
ai_name: str,
ai_role: str,
memory: VectorStoreRetriever,
tools: List[BaseTool],
llm: BaseChatModel,
human_in_the_loop: bool = False,
output_parser: Optional[BaseAutoGPTOutputParser] = None,
chat_history_memory: Optional[BaseChatMessageHistory] = None,
) -> AutoGPT:
prompt = AutoGPTPrompt(
ai_name=ai_name,
ai_role=ai_role,
tools=tools,
input_variables=["memory", "messages", "goals", "user_input"],
token_counter=llm.get_num_tokens,
)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
memory,
chain,
output_parser or AutoGPTOutputParser(),
tools,
feedback_tool=human_feedback_tool,
chat_history_memory=chat_history_memory,
)
def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, "
"and respond using the format specified above:"
)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
goals=goals,
messages=self.chat_history_memory.messages,
memory=self.memory,
user_input=user_input,
)
# Print Assistant thoughts
print(assistant_reply)
self.chat_history_memory.add_message(HumanMessage(content=user_input))
self.chat_history_memory.add_message(AIMessage(content=assistant_reply))
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
if action.name == FINISH_NAME:
return action.args["response"]
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"Unknown command '{action.name}'. "
"Please refer to the 'COMMANDS' list for available "
"commands and only respond in the specified JSON format."
)
memory_to_add = f"Assistant Reply: {assistant_reply} \nResult: {result} "
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback
self.memory.add_documents([Document(page_content=memory_to_add)])
self.chat_history_memory.add_message(SystemMessage(content=result))

@ -1,276 +0,0 @@
import logging
import os
import time
import openai_model
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
class OpenAI:
def __init__(
self,
api_key,
strategy="cot",
evaluation_strategy="value",
api_base="",
api_model="",
):
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai_model.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == "" or api_base is None:
api_base = os.environ.get(
"OPENAI_API_BASE", ""
) # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai_model.api_base = api_base
print(f"Using custom api_base {api_base}")
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f"Using api_model {self.api_model}")
self.use_chat_api = "gpt" in self.api_model
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def run(self, prompt, max_tokens, temperature, k=1, stop=None):
while True:
try:
if self.use_chat_api:
messages = [{"role": "user", "content": prompt}]
response = openai_model.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
else:
response = openai_model.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
)
with open("openai.logs", "a") as log_file:
log_file.write(
"\n" + "-----------" + "\n" + "Prompt : " + prompt + "\n"
)
return response
except openai_model.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(
f"{str(e)}, sleep for {sleep_duratoin}s, set it by env"
" OPENAI_RATE_TIMEOUT"
)
time.sleep(sleep_duratoin)
def openai_choice2text_handler(self, choice):
if self.use_chat_api:
text = choice["message"]["content"]
else:
text = choice.text.strip()
return text
def generate_text(self, prompt, k):
if self.use_chat_api:
thoughts = []
for _ in range(k):
response = self.run(prompt, 400, 0.5, k)
text = self.openai_choice2text_handler(response.choices[0])
thoughts += [text]
# print(f'thoughts: {thoughts}')
return thoughts
else:
response = self.run(prompt, 300, 0.5, k)
thoughts = [
self.openai_choice2text_handler(choice) for choice in response.choices
]
return thoughts
def generate_thoughts(self, state, k, initial_prompt, rejected_solutions=None):
if isinstance(state, str):
pass
else:
"\n".join(state)
print("New state generating thought:", state, "\n\n")
prompt = f"""
Accomplish the task below by decomposing it as many very explicit subtasks as possible, be very explicit and thorough denoted by
a search process, highlighted by markers 1,..., 3 as first operations guiding subtree exploration for the OBJECTIVE,
focus on the third subtree exploration. Produce prospective search steps (e.g., the subtree exploration 5. 11 + 1)
and evaluates potential subsequent steps to either progress
towards a solution or retrace to another viable subtree then be very thorough
and think atomically then provide solutions for those subtasks,
then return the definitive end result and then summarize it
########## OBJECTIVE
{initial_prompt}
###################
"""
thoughts = self.generate_text(prompt, k)
# print(f"Generated thoughts: {thoughts}")
return thoughts
def generate_solution(self, initial_prompt, state, rejected_solutions=None):
try:
if isinstance(state, list):
state_text = "\n".join(state)
else:
state_text = state
prompt = f"""
Generate a series of solutions to comply with the user's instructions,
you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time,
while taking rejected solutions into account and learning from them.
Considering the reasoning provided:\n\n
###'{state_text}'\n\n###
Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected:
###{rejected_solutions}###,
complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them."""
answer = self.generate_text(prompt, 1)
print(f"Generated Solution Summary {answer}")
return answer
except Exception as e:
logger.error(f"Error in generate_solutions: {e}")
return None
def evaluate_states(self, states, initial_prompt):
if not states:
return {}
if self.evaluation_strategy == "value":
state_values = {}
for state in states:
if isinstance(state, str):
state_text = state
else:
state_text = "\n".join(state)
print(
"We receive a state of type",
type(state),
"For state: ",
state,
"\n\n",
)
prompt = f""" To achieve the following goal: '{initial_prompt}', pessimistically value the context of the past solutions and more importantly the latest generated solution you had AS A FLOAT BETWEEN 0 AND 1\n
Past solutions:\n\n
{state_text}\n
If the solutions is not making fast progress in achieving the goal, give it a lower score.
Evaluate all solutions AS A FLOAT BETWEEN 0 and 1:\n, DO NOT RETURN ANYTHING ELSE
"""
response = self.run(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
# print(f'state: {value_text}')
value = float(value_text)
print(f"Evaluated Thought Value: {value}")
except ValueError:
value = 0
state_values[state] = value
return state_values
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class AoTAgent:
def __init__(
self,
num_thoughts: int = None,
max_steps: int = None,
value_threshold: float = None,
pruning_threshold=0.5,
backtracking_threshold=0.4,
initial_prompt=None,
openai_api_key: str = None,
model=None,
):
self.num_thoughts = num_thoughts
self.max_steps = max_steps
self.value_threshold = value_threshold
self.backtracking_threshold = backtracking_threshold
self.pruning_threshold = pruning_threshold
self.initial_prompt = initial_prompt
self.output = []
self.openai_api_key = openai_api_key
self.model = model
self.model = self.model or OpenAI(api_key=self.openai_api_key)
def solve(self):
try:
self.dfs(self.initial_prompt, 1)
if not self.output:
logger.error("No valid thoughts were generated during DFS")
return None
best_state, _ = max(self.output, key=lambda x: x[1])
solution = self.model.generate_solution(self.initial_prompt, best_state)
print(f"Solution is {solution}")
return solution if solution else best_state
except Exception as error:
logger.error(f"Error in tot_dfs: {error}")
raise error
def dfs(self, state, step):
if step > self.max_steps:
thought, value = self.evaluate_thought(state)
self.output.append((thought, value))
return
thoughts = self.generate_and_filter_thoughts(state)
for next_state in thoughts:
state_value = self.evaluated_thoughts[next_state]
if state_value > self.value_threshold:
child = (
(state, next_state)
if isinstance(state, str)
else (*state, next_state)
)
self.dfs(child, step + 1)
# backtracking
best_value = max([value for _, value in self.output])
if best_value < self.backtracking_threshold:
self.output.pop()
continue
def generate_and_filter_thoughts(self, state):
thoughts = self.model.generate_thoughts(
state, self.num_thoughts, self.initial_prompt
)
self.evaluated_thoughts = self.model.evaluate_states(
thoughts, self.initial_prompt
)
filtered_thoughts = [
thought
for thought in thoughts
if self.evaluated_thoughts[thought] >= self.pruning_threshold
]
print(f"filtered_thoughts: {filtered_thoughts}")
return filtered_thoughts
def evaluate_thought(self, state):
thought = self.model.generate_thoughts(state, 1, self.initial_prompt)
value = self.model.evaluate_states([state], self.initial_prompt)[state]
print(f"Evaluated thought: {value}")
return thought, value

@ -1,4 +0,0 @@
"""
Companion agents converse with the user about the agent the user wants to create then creates the agent with the desired attributes and traits and tools and configurations
"""

@ -1,599 +0,0 @@
import importlib.util
import json
import os
from dataclasses import dataclass
from typing import Dict
from huggingface_hub import hf_hub_download, list_spaces
from transformers.tools.base import (
TASK_MAPPING,
TOOL_CONFIG_FILE,
Tool,
load_tool,
supports_remote,
)
from transformers.tools.prompts import CHAT_MESSAGE_PROMPT, download_prompt
from transformers.tools.python_interpreter import evaluate
from transformers.utils import is_offline_mode, is_openai_available, logging
# utils
logger = logging.get_logger(__name__)
if is_openai_available():
import openai
else:
StoppingCriteria = object
_tools_are_initialized = False
BASE_PYTHON_TOOLS = {
"print": print,
"range": range,
"float": float,
"int": int,
"bool": bool,
"str": str,
}
@dataclass
class PreTool:
task: str
description: str
repo_id: str
HUGGINGFACE_DEFAULT_TOOLS = {}
HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB = [
"image-transformation",
"text-download",
"text-to-image",
"text-to-video",
]
def get_remote_tools(organization="huggingface-tools"):
if is_offline_mode():
logger.info("You are in offline mode, so remote tools are not available.")
return {}
spaces = list_spaces(author=organization)
tools = {}
for space_info in spaces:
repo_id = space_info.id
resolved_config_file = hf_hub_download(
repo_id, TOOL_CONFIG_FILE, repo_type="space"
)
with open(resolved_config_file, encoding="utf-8") as reader:
config = json.load(reader)
task = repo_id.split("/")[-1]
tools[config["name"]] = PreTool(
task=task, description=config["description"], repo_id=repo_id
)
return tools
def _setup_default_tools():
global HUGGINGFACE_DEFAULT_TOOLS
global _tools_are_initialized
if _tools_are_initialized:
return
main_module = importlib.import_module("transformers")
tools_module = main_module.tools
remote_tools = get_remote_tools()
for task_name, tool_class_name in TASK_MAPPING.items():
tool_class = getattr(tools_module, tool_class_name)
description = tool_class.description
HUGGINGFACE_DEFAULT_TOOLS[tool_class.name] = PreTool(
task=task_name, description=description, repo_id=None
)
if not is_offline_mode():
for task_name in HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB:
found = False
for tool_name, tool in remote_tools.items():
if tool.task == task_name:
HUGGINGFACE_DEFAULT_TOOLS[tool_name] = tool
found = True
break
if not found:
raise ValueError(f"{task_name} is not implemented on the Hub.")
_tools_are_initialized = True
def resolve_tools(code, toolbox, remote=False, cached_tools=None):
if cached_tools is None:
resolved_tools = BASE_PYTHON_TOOLS.copy()
else:
resolved_tools = cached_tools
for name, tool in toolbox.items():
if name not in code or name in resolved_tools:
continue
if isinstance(tool, Tool):
resolved_tools[name] = tool
else:
task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id
_remote = remote and supports_remote(task_or_repo_id)
resolved_tools[name] = load_tool(task_or_repo_id, remote=_remote)
return resolved_tools
def get_tool_creation_code(code, toolbox, remote=False):
code_lines = ["from transformers import load_tool", ""]
for name, tool in toolbox.items():
if name not in code or isinstance(tool, Tool):
continue
task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id
line = f'{name} = load_tool("{task_or_repo_id}"'
if remote:
line += ", remote=True"
line += ")"
code_lines.append(line)
return "\n".join(code_lines) + "\n"
def clean_code_for_chat(result):
lines = result.split("\n")
idx = 0
while idx < len(lines) and not lines[idx].lstrip().startswith("```"):
idx += 1
explanation = "\n".join(lines[:idx]).strip()
if idx == len(lines):
return explanation, None
idx += 1
start_idx = idx
while not lines[idx].lstrip().startswith("```"):
idx += 1
code = "\n".join(lines[start_idx:idx]).strip()
return explanation, code
def clean_code_for_run(result):
result = f"I will use the following {result}"
explanation, code = result.split("Answer:")
explanation = explanation.strip()
code = code.strip()
code_lines = code.split("\n")
if code_lines[0] in ["```", "```py", "```python"]:
code_lines = code_lines[1:]
if code_lines[-1] == "```":
code_lines = code_lines[:-1]
code = "\n".join(code_lines)
return explanation, code
class Agent:
"""
Base class for all agents which contains the main API methods.
Args:
chat_prompt_template (`str`, *optional*):
Pass along your own prompt if you want to override the default template for the `chat` method. Can be the
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
`chat_prompt_template.txt` in this repo in this case.
run_prompt_template (`str`, *optional*):
Pass along your own prompt if you want to override the default template for the `run` method. Can be the
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
`run_prompt_template.txt` in this repo in this case.
additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*):
Any additional tools to include on top of the default ones. If you pass along a tool with the same name as
one of the default tools, that default tool will be overridden.
"""
def __init__(
self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None
):
_setup_default_tools()
agent_name = self.__class__.__name__
self.chat_prompt_template = download_prompt(
chat_prompt_template, agent_name, mode="chat"
)
self.run_prompt_template = download_prompt(
run_prompt_template, agent_name, mode="run"
)
self._toolbox = HUGGINGFACE_DEFAULT_TOOLS.copy()
self.log = print
if additional_tools is not None:
if isinstance(additional_tools, (list, tuple)):
additional_tools = {t.name: t for t in additional_tools}
elif not isinstance(additional_tools, dict):
additional_tools = {additional_tools.name: additional_tools}
replacements = {
name: tool
for name, tool in additional_tools.items()
if name in HUGGINGFACE_DEFAULT_TOOLS
}
self._toolbox.update(additional_tools)
if len(replacements) > 1:
names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()])
logger.warning(
"The following tools have been replaced by the ones provided in"
f" `additional_tools`:\n{names}."
)
elif len(replacements) == 1:
name = list(replacements.keys())[0]
logger.warning(
f"{name} has been replaced by {replacements[name]} as provided in"
" `additional_tools`."
)
self.prepare_for_new_chat()
@property
def toolbox(self) -> Dict[str, Tool]:
"""Get all tool currently available to the agent"""
return self._toolbox
def format_prompt(self, task, chat_mode=False):
description = "\n".join(
[f"- {name}: {tool.description}" for name, tool in self.toolbox.items()]
)
if chat_mode:
if self.chat_history is None:
prompt = self.chat_prompt_template.replace("<<all_tools>>", description)
else:
prompt = self.chat_history
prompt += CHAT_MESSAGE_PROMPT.replace("<<task>>", task)
else:
prompt = self.run_prompt_template.replace("<<all_tools>>", description)
prompt = prompt.replace("<<prompt>>", task)
return prompt
def set_stream(self, streamer):
"""
Set the function use to stream results (which is `print` by default).
Args:
streamer (`callable`): The function to call when streaming results from the LLM.
"""
self.log = streamer
def chat(self, task, *, return_code=False, remote=False, **kwargs):
"""
Sends a new request to the agent in a chat. Will use the previous ones in its history.
Args:
task (`str`): The task to perform
return_code (`bool`, *optional*, defaults to `False`):
Whether to just return code and not evaluate it.
remote (`bool`, *optional*, defaults to `False`):
Whether or not to use remote tools (inference endpoints) instead of local ones.
kwargs (additional keyword arguments, *optional*):
Any keyword argument to send to the agent when evaluating the code.
Example:
```py
from transformers import HfAgent
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder")
agent.chat("Draw me a picture of rivers and lakes")
agent.chat("Transform the picture so that there is a rock in there")
```
"""
prompt = self.format_prompt(task, chat_mode=True)
result = self.generate_one(prompt, stop=["Human:", "====="])
self.chat_history = prompt + result.strip() + "\n"
explanation, code = clean_code_for_chat(result)
self.log(f"==Explanation from the agent==\n{explanation}")
if code is not None:
self.log(f"\n\n==Code generated by the agent==\n{code}")
if not return_code:
self.log("\n\n==Result==")
self.cached_tools = resolve_tools(
code, self.toolbox, remote=remote, cached_tools=self.cached_tools
)
self.chat_state.update(kwargs)
return evaluate(
code, self.cached_tools, self.chat_state, chat_mode=True
)
else:
tool_code = get_tool_creation_code(code, self.toolbox, remote=remote)
return f"{tool_code}\n{code}"
def prepare_for_new_chat(self):
"""
Clears the history of prior calls to [`~Agent.chat`].
"""
self.chat_history = None
self.chat_state = {}
self.cached_tools = None
def run(self, task, *, return_code=False, remote=False, **kwargs):
"""
Sends a request to the agent.
Args:
task (`str`): The task to perform
return_code (`bool`, *optional*, defaults to `False`):
Whether to just return code and not evaluate it.
remote (`bool`, *optional*, defaults to `False`):
Whether or not to use remote tools (inference endpoints) instead of local ones.
kwargs (additional keyword arguments, *optional*):
Any keyword argument to send to the agent when evaluating the code.
Example:
```py
from transformers import HfAgent
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder")
agent.run("Draw me a picture of rivers and lakes")
```
"""
prompt = self.format_prompt(task)
result = self.generate_one(prompt, stop=["Task:"])
explanation, code = clean_code_for_run(result)
self.log(f"==Explanation from the agent==\n{explanation}")
self.log(f"\n\n==Code generated by the agent==\n{code}")
if not return_code:
self.log("\n\n==Result==")
self.cached_tools = resolve_tools(
code, self.toolbox, remote=remote, cached_tools=self.cached_tools
)
return evaluate(code, self.cached_tools, state=kwargs.copy())
else:
tool_code = get_tool_creation_code(code, self.toolbox, remote=remote)
return f"{tool_code}\n{code}"
def generate_one(self, prompt, stop):
# This is the method to implement in your custom agent.
raise NotImplementedError
def generate_many(self, prompts, stop):
# Override if you have a way to do batch generation faster than one by one
return [self.generate_one(prompt, stop) for prompt in prompts]
class HFAgent(Agent):
"""
Agent that uses the openai API to generate code.
<Tip warning={true}>
The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like
`"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version.
</Tip>
Args:
model (`str`, *optional*, defaults to `"text-davinci-003"`):
The name of the OpenAI model to use.
api_key (`str`, *optional*):
The API key to use. If unset, will look for the environment variable `"OPENAI_API_KEY"`.
chat_prompt_template (`str`, *optional*):
Pass along your own prompt if you want to override the default template for the `chat` method. Can be the
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
`chat_prompt_template.txt` in this repo in this case.
run_prompt_template (`str`, *optional*):
Pass along your own prompt if you want to override the default template for the `run` method. Can be the
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
`run_prompt_template.txt` in this repo in this case.
additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*):
Any additional tools to include on top of the default ones. If you pass along a tool with the same name as
one of the default tools, that default tool will be overridden.
Example:
```py
from swarms.agents.hf_agents import HFAgent
agent = OpenAiAgent(model="text-davinci-003", api_key=xxx)
agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!")
```
"""
def __init__(
self,
model="text-davinci-003",
api_key=None,
chat_prompt_template=None,
run_prompt_template=None,
additional_tools=None,
):
if not is_openai_available():
raise ImportError(
"Using `OpenAiAgent` requires `openai`: `pip install openai`."
)
if api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", None)
if api_key is None:
raise ValueError(
"You need an openai key to use `OpenAIAgent`. You can get one here: Get"
" one here https://openai.com/api/`. If you have one, set it in your"
" env with `os.environ['OPENAI_API_KEY'] = xxx."
)
else:
openai.api_key = api_key
self.model = model
super().__init__(
chat_prompt_template=chat_prompt_template,
run_prompt_template=run_prompt_template,
additional_tools=additional_tools,
)
def generate_many(self, prompts, stop):
if "gpt" in self.model:
return [self._chat_generate(prompt, stop) for prompt in prompts]
else:
return self._completion_generate(prompts, stop)
def generate_one(self, prompt, stop):
if "gpt" in self.model:
return self._chat_generate(prompt, stop)
else:
return self._completion_generate([prompt], stop)[0]
def _chat_generate(self, prompt, stop):
result = openai.ChatCompletion.create(
model=self.model,
messages=[{"role": "user", "content": prompt}],
temperature=0,
stop=stop,
)
return result["choices"][0]["message"]["content"]
def _completion_generate(self, prompts, stop):
result = openai.Completion.create(
model=self.model,
prompt=prompts,
temperature=0,
stop=stop,
max_tokens=200,
)
return [answer["text"] for answer in result["choices"]]
class AzureOpenAI(Agent):
"""
Agent that uses Azure OpenAI to generate code. See the [official
documentation](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) to learn how to deploy an openAI
model on Azure
<Tip warning={true}>
The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like
`"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version.
</Tip>
Args:
deployment_id (`str`):
The name of the deployed Azure openAI model to use.
api_key (`str`, *optional*):
The API key to use. If unset, will look for the environment variable `"AZURE_OPENAI_API_KEY"`.
resource_name (`str`, *optional*):
The name of your Azure OpenAI Resource. If unset, will look for the environment variable
`"AZURE_OPENAI_RESOURCE_NAME"`.
api_version (`str`, *optional*, default to `"2022-12-01"`):
The API version to use for this agent.
is_chat_mode (`bool`, *optional*):
Whether you are using a completion model or a chat model (see note above, chat models won't be as
efficient). Will default to `gpt` being in the `deployment_id` or not.
chat_prompt_template (`str`, *optional*):
Pass along your own prompt if you want to override the default template for the `chat` method. Can be the
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
`chat_prompt_template.txt` in this repo in this case.
run_prompt_template (`str`, *optional*):
Pass along your own prompt if you want to override the default template for the `run` method. Can be the
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
`run_prompt_template.txt` in this repo in this case.
additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*):
Any additional tools to include on top of the default ones. If you pass along a tool with the same name as
one of the default tools, that default tool will be overridden.
Example:
```py
from transformers import AzureOpenAiAgent
agent = AzureAiAgent(deployment_id="Davinci-003", api_key=xxx, resource_name=yyy)
agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!")
```
"""
def __init__(
self,
deployment_id,
api_key=None,
resource_name=None,
api_version="2022-12-01",
is_chat_model=None,
chat_prompt_template=None,
run_prompt_template=None,
additional_tools=None,
):
if not is_openai_available():
raise ImportError(
"Using `OpenAiAgent` requires `openai`: `pip install openai`."
)
self.deployment_id = deployment_id
openai.api_type = "azure"
if api_key is None:
api_key = os.environ.get("AZURE_OPENAI_API_KEY", None)
if api_key is None:
raise ValueError(
"You need an Azure openAI key to use `AzureOpenAIAgent`. If you have"
" one, set it in your env with `os.environ['AZURE_OPENAI_API_KEY'] ="
" xxx."
)
else:
openai.api_key = api_key
if resource_name is None:
resource_name = os.environ.get("AZURE_OPENAI_RESOURCE_NAME", None)
if resource_name is None:
raise ValueError(
"You need a resource_name to use `AzureOpenAIAgent`. If you have one,"
" set it in your env with `os.environ['AZURE_OPENAI_RESOURCE_NAME'] ="
" xxx."
)
else:
openai.api_base = f"https://{resource_name}.openai.azure.com"
openai.api_version = api_version
if is_chat_model is None:
is_chat_model = "gpt" in deployment_id.lower()
self.is_chat_model = is_chat_model
super().__init__(
chat_prompt_template=chat_prompt_template,
run_prompt_template=run_prompt_template,
additional_tools=additional_tools,
)
def generate_many(self, prompts, stop):
if self.is_chat_model:
return [self._chat_generate(prompt, stop) for prompt in prompts]
else:
return self._completion_generate(prompts, stop)
def generate_one(self, prompt, stop):
if self.is_chat_model:
return self._chat_generate(prompt, stop)
else:
return self._completion_generate([prompt], stop)[0]
def _chat_generate(self, prompt, stop):
result = openai.ChatCompletion.create(
engine=self.deployment_id,
messages=[{"role": "user", "content": prompt}],
temperature=0,
stop=stop,
)
return result["choices"][0]["message"]["content"]
def _completion_generate(self, prompts, stop):
result = openai.Completion.create(
engine=self.deployment_id,
prompt=prompts,
temperature=0,
stop=stop,
max_tokens=200,
)
return [answer["text"] for answer in result["choices"]]

@ -1,110 +0,0 @@
import os
import logging
from dataclasses import dataclass
from swarms.models.dalle3 import Dalle
from swarms.models import OpenAIChat
@dataclass
class Idea2Image:
"""
A class used to generate images from text prompts using DALLE-3.
...
Attributes
----------
image : str
Text prompt for the image to generate
openai_api_key : str
OpenAI API key
cookie : str
Cookie value for DALLE-3
output_folder : str
Folder to save the generated images
Methods
-------
llm_prompt():
Returns a prompt for refining the image generation
generate_image():
Generates and downloads the image based on the prompt
Usage:
------
from dalle3 import Idea2Image
idea2image = Idea2Image(
image="Fish hivemind swarm in light blue avatar anime in zen garden pond concept art anime art, happy fish, anime scenery"
)
idea2image.run()
"""
image: str
openai_api_key: str = os.getenv("OPENAI_API_KEY") or None
cookie: str = os.getenv("BING_COOKIE") or None
output_folder: str = "images/"
def __post_init__(self):
self.llm = OpenAIChat(openai_api_key=self.openai_api_key)
self.dalle = Dalle(self.cookie)
def llm_prompt(self):
LLM_PROMPT = f"""
Refine the USER prompt to create a more precise image tailored to the user's needs using
an image generator like DALLE-3.
###### FOLLOW THE GUIDE BELOW TO REFINE THE PROMPT ######
- Use natural language prompts up to 400 characters to describe the image you want to generate. Be as specific or vague as needed.
- Frame your photographic prompts like camera position, lighting, film type, year, usage context. This implicitly suggests image qualities.
- For illustrations, you can borrow photographic terms like "close up" and prompt for media, style, artist, animation style, etc.
- Prompt hack: name a film/TV show genre + year to "steal the look" for costumes, lighting, etc without knowing technical details.
- Try variations of a prompt, make edits, and do recursive uncropping to create interesting journeys and zoom-out effects.
- Use an image editor like Photopea to uncrop DALL-E outputs and prompt again to extend the image.
- Combine separate DALL-E outputs into panoramas and murals with careful positioning/editing.
- Browse communities like Reddit r/dalle2 to get inspired and share your creations. See tools, free image resources, articles.
- Focus prompts on size, structure, shape, mood, aesthetics to influence the overall vibe and composition.
- Be more vague or detailed as needed - DALL-E has studied over 400M images and can riff creatively or replicate specific styles.
- Be descriptive, describe the art style at the end like fusing concept art with anime art or game art or product design art.
###### END OF GUIDE ######
Prompt to refine: {self.image}
"""
return LLM_PROMPT
def run(self):
"""
Generates and downloads the image based on the prompt.
This method refines the prompt using the llm, opens the website with the query,
gets the image URLs, and downloads the images to the specified folder.
"""
# Set up logging
logging.basicConfig(level=logging.INFO)
# Refine the prompt using the llm
image = self.llm_prompt()
refined_prompt = self.llm(image)
print(f"Refined prompt: {refined_prompt}")
# Open the website with your query
self.dalle.create(refined_prompt)
# Get the image URLs
urls = self.dalle.get_urls()
# Download the images to your specified folder
self.dalle.download(urls, self.output_folder)

@ -1,158 +0,0 @@
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
class MetaPrompterAgent:
"""
Meta Prompting Agent
The Meta Prompting Agent has 1 purpose: to create better prompts for an agent.
The meta prompting agent would be used in this flow:
user task -> MetaPrompterAgent -> Agent
Args:
llm (BaseLanguageModel): Language Model
max_iters (int, optional): Maximum number of iterations. Defaults to 3.
max_meta_iters (int, optional): Maximum number of meta iterations. Defaults to 5.
failed_phrase (str, optional): Phrase to indicate failure. Defaults to "task failed".
success_phrase (str, optional): Phrase to indicate success. Defaults to "task succeeded".
instructions (str, optional): Instructions to be used in the meta prompt. Defaults to "None".
template (str, optional): Template to be used in the meta prompt. Defaults to None.
memory (ConversationBufferWindowMemory, optional): Memory to be used in the meta prompt. Defaults to None.
meta_template (str, optional): Template to be used in the meta prompt. Defaults to None.
human_input (bool, optional): Whether to use human input. Defaults to False.
Returns:
str: Response from the agent
Usage:
--------------
from swarms.workers import Worker
from swarms.agents.meta_prompter import MetaPrompterAgent
from langchain.llms import OpenAI
#init llm
llm = OpenAI()
#init the meta prompter agent that optimized prompts
meta_optimizer = MetaPrompterAgent(llm=llm)
#init the worker agent
worker = Worker(llm)
#broad task to complete
task = "Create a feedforward in pytorch"
#optimize the prompt
optimized_prompt = meta_optimizer.run(task)
#run the optimized prompt with detailed instructions
result = worker.run(optimized_prompt)
print(result)
"""
def __init__(
self,
llm,
max_iters: int = 3,
max_meta_iters: int = 5,
failed_phrase: str = "task failed",
success_phrase: str = "task succeeded",
instructions: str = "None",
template: str = None,
memory=None,
meta_template: str = None,
human_input: bool = False,
):
self.llm = llm
self.max_iters = max_iters
self.max_meta_iters = max_meta_iters
self.failed_phrase = failed_phrase
self.success_phrase = success_phrase
self.instructions = instructions
self.template = template
self.memory = memory
self.meta_template = meta_template
self.human_input = human_input
if memory is None:
memory = ConversationBufferWindowMemory()
memory.ai_prefix = "Assistant:"
template = f"""
Instructions: {self.instructions}
{{{memory.memory_key}}}
Human: {{human_input}}
Assistant:
"""
prompt = PromptTemplate(
input_variables=["history", "human_input"], template=template
)
self.chain = LLMChain(
llm=self.llm(),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(),
)
def get_chat_history(self, chain_memory):
"""Get Chat History from the memory"""
memory_key = chain_memory.memory_key
chat_history = chain_memory.load_memory_variables(memory_key)[memory_key]
return chat_history
def get_new_instructions(self, meta_output):
"""Get New Instructions from the meta_output"""
delimiter = "Instructions: "
new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :]
return new_instructions
def run(self, task: str):
"""
Run the MetaPrompterAgent
Args:
task (str): The task to be completed
Returns:
str: The response from the agent
"""
key_phrases = [self.success_phrase, self.failed_phrase]
for i in range(self.max_meta_iters):
print(f"[Epsisode: {i+1}/{self.max_meta_iters}]")
chain = self.chain(memory=None)
output = chain.predict(human_input=task)
for j in range(self.max_iters):
print(f"(Step {j+1}/{self.max_iters})")
print(f"Assistant: {output}")
print("Human: ")
if self.human_input:
human_input = input()
if any(phrase in human_input.lower() for phrase in key_phrases):
break
output = chain.predict(human_input.lower)
if self.success_phrase in human_input.lower():
print("You succeed! Thanks for using!")
return
meta_chain = self.initialize_meta_chain()
meta_output = meta_chain.predict(
chat_history=self.get_chat_history(chain.memory)
)
print(f"Feedback: {meta_output}")
self.instructions = self.get_new_instructions(meta_output)
print(f"New Instruction: {self.instructions}")
print("\n" + "#" * 80 + "\n")

File diff suppressed because it is too large Load Diff

@ -1,12 +0,0 @@
"""The Replicator"""
class Replicator:
def __init__(
self,
model_name,
):
pass
def run(self, task):
pass

@ -1,9 +0,0 @@
class PromptRefiner:
def __init__(self, system_prompt: str, llm):
super().__init__()
self.system_prompt = system_prompt
self.llm = llm
def run(self, task: str):
refine = self.llm(f"System Prompt: {self.system_prompt} Current task: {task}")
return refine

@ -1,37 +0,0 @@
from termcolor import colored
class SimpleAgent:
"""
Simple Agent is a simple agent that runs a flow.
Args:
name (str): Name of the agent
flow (Flow): Flow to run
Example:
>>> from swarms.agents.simple_agent import SimpleAgent
>>> from swarms.structs import Flow
>>> from swarms.models import OpenAIChat
>>> api_key = ""
>>> llm = OpenAIChat()
"""
def __init__(
self,
name: str,
flow,
):
self.name = name
self.flow = flow
self.message_history = []
def run(self, task: str) -> str:
"""Run method"""
metrics = print(colored(f"Agent {self.name} is running task: {task}", "red"))
print(metrics)
response = self.flow.run(task)
self.message_history.append((self.name, response))
return response

@ -17,7 +17,6 @@ from dataclasses import dataclass
from typing import List, Optional, Callable
from termcolor import colored
import os
import sys
@dataclass

@ -3,7 +3,7 @@ from swarms.models.anthropic import Anthropic
from swarms.models.petals import Petals
from swarms.models.mistral import Mistral
# from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat
from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat
from swarms.models.zephyr import Zephyr
from swarms.models.biogpt import BioGPT
from swarms.models.huggingface import HuggingfaceLLM
@ -31,9 +31,9 @@ __all__ = [
"Anthropic",
"Petals",
"Mistral",
# "OpenAI",
# "AzureOpenAI",
# "OpenAIChat",
"OpenAI",
"AzureOpenAI",
"OpenAIChat",
"Zephyr",
"Idefics",
"Kosmos",
@ -44,7 +44,7 @@ __all__ = [
"HuggingfaceLLM",
"MPT7B",
"WizardLLMStoryTeller",
# "GPT4Vision",
# "Dalle3",
# "Fuyu",
"GPT4Vision",
"Dalle3",
"Fuyu",
]

@ -52,7 +52,7 @@ class AbstractModel(ABC):
def _time_for_generation(self, task: str) -> float:
"""Time for Generation"""
self.start_time = time.time()
output = self.run(task)
self.run(task)
self.end_time = time.time()
return self.end_time - self.start_time

@ -4,7 +4,7 @@ import os
import uuid
from dataclasses import dataclass
from io import BytesIO
from typing import List, Optional
from typing import List
import backoff
import openai

@ -121,7 +121,7 @@ class DistilWhisperModel:
# Load the whole audio file, but process and transcribe it in chunks
audio_input = self.processor.audio_file_to_array(audio_file_path)
sample_rate = audio_input.sampling_rate
total_duration = len(audio_input.array) / sample_rate
len(audio_input.array) / sample_rate
chunks = [
audio_input.array[i : i + sample_rate * chunk_duration]
for i in range(

@ -2,7 +2,6 @@ import json
import os
from typing import List
import numpy as np
import timm
import torch
from PIL import Image

@ -1,6 +1,5 @@
from typing import List, Tuple
import numpy as np
from PIL import Image
from pydantic import BaseModel, root_validator, validator
from transformers import AutoModelForVision2Seq, AutoProcessor

@ -1,4 +1,3 @@
"""OpenAI chat wrapper."""
from __future__ import annotations
import logging

@ -2,7 +2,7 @@ from typing import List
import timm
import torch
from pydantic import BaseModel, conlist
from pydantic import BaseModel
class TimmModelInfo(BaseModel):

@ -4,9 +4,6 @@ TROCR for Multi-Modal OCR tasks
"""
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
from PIL import Image
import requests
class TrOCR:

@ -4,3 +4,14 @@ from swarms.prompts.growth_agent_prompt import GROWTH_AGENT_PROMPT
from swarms.prompts.legal_agent_prompt import LEGAL_AGENT_PROMPT
from swarms.prompts.operations_agent_prompt import OPERATIONS_AGENT_PROMPT
from swarms.prompts.product_agent_prompt import PRODUCT_AGENT_PROMPT
__all__ = [
"CODE_INTERPRETER",
"FINANCE_AGENT_PROMPT",
"GROWTH_AGENT_PROMPT",
"LEGAL_AGENT_PROMPT",
"OPERATIONS_AGENT_PROMPT",
"PRODUCT_AGENT_PROMPT",
]

@ -1,4 +1,4 @@
AUTOBLOG_GEN_GENERATOR = f"""
AUTOBLOG_GEN_GENERATOR = """
First search for a list of topics on the web based their relevance to Positive Med's long term vision then rank than based on the goals this month, then output a single headline title for a blog for the next autonomous agent to write the blog, utilize the SOP below to help you strategically select topics. Output a single topic that will be the foundation for a blog.
@ -238,7 +238,7 @@ Denote the social media's by using the social media name in HTML like tags
# Agent that generates blogs
DRAFT_AGENT_SYSTEM_PROMPT = f"""
DRAFT_AGENT_SYSTEM_PROMPT = """
Write a 5,000+ word long narrative essay on the highest rated topic from a list of topics for positivemed.com,
their vision is: to democratize health wisdom to modern young professionals in a healthy and conversational and friendly manner,

@ -1,9 +1,8 @@
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Dict, List, Sequence
from typing import Dict, List, Sequence
from pydantic import Field
class Message:

@ -19,7 +19,6 @@ from typing import Any, Callable, Dict, List, Optional, Tuple
from termcolor import colored
import inspect
import random
from swarms.tools.tool import BaseTool
# Prompts
DYNAMIC_STOP_PROMPT = """
@ -299,7 +298,7 @@ class Flow:
model_config = self.get_llm_init_params()
print(colored("Initializing Agent Dashboard...", "yellow"))
dashboard = print(
print(
colored(
f"""
Flow Dashboard

@ -1,5 +1,3 @@
import concurrent.futures
import os
from termcolor import colored

@ -1,4 +1,3 @@
from typing import List
class DialogueSimulator:

@ -1,4 +1,3 @@
from typing import List, Callable
# Define a selection function

@ -2,3 +2,10 @@ from swarms.utils.display_markdown import display_markdown_message
from swarms.utils.futures import execute_futures_dict
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.parse_code import extract_code_in_backticks_in_string
__all__ = [
"display_markdown_message",
"execute_futures_dict",
"SubprocessCodeInterpreter",
"extract_code_in_backticks_in_string",
]

@ -1,2 +1 @@
# from swarms.workers.worker import Worker
from swarms.workers.base import AbstractWorker

@ -2,9 +2,7 @@ import os
from unittest.mock import Mock, patch
import pytest
from cohere.models.response import GenerationChunk
from dotenv import load_dotenv
from swarms.models.cohere_chat import BaseCohere, Cohere
# Load the environment variables
@ -17,6 +15,66 @@ def cohere_instance():
return Cohere(cohere_api_key=api_key)
def test_cohere_custom_configuration(cohere_instance):
# Test customizing Cohere configurations
cohere_instance.model = "base"
cohere_instance.temperature = 0.5
cohere_instance.max_tokens = 100
cohere_instance.k = 1
cohere_instance.p = 0.8
cohere_instance.frequency_penalty = 0.2
cohere_instance.presence_penalty = 0.4
response = cohere_instance("Customize configurations.")
assert isinstance(response, str)
def test_cohere_api_error_handling(cohere_instance):
# Test error handling when the API key is invalid
cohere_instance.model = "base"
cohere_instance.cohere_api_key = "invalid-api-key"
with pytest.raises(Exception):
cohere_instance("Error handling with invalid API key.")
def test_cohere_async_api_error_handling(cohere_instance):
# Test async error handling when the API key is invalid
cohere_instance.model = "base"
cohere_instance.cohere_api_key = "invalid-api-key"
with pytest.raises(Exception):
cohere_instance.async_call("Error handling with invalid API key.")
def test_cohere_stream_api_error_handling(cohere_instance):
# Test error handling in streaming mode when the API key is invalid
cohere_instance.model = "base"
cohere_instance.cohere_api_key = "invalid-api-key"
with pytest.raises(Exception):
generator = cohere_instance.stream("Error handling with invalid API key.")
for token in generator:
pass
def test_cohere_streaming_mode(cohere_instance):
# Test the streaming mode for large text generation
cohere_instance.model = "base"
cohere_instance.streaming = True
prompt = "Generate a lengthy text using streaming mode."
generator = cohere_instance.stream(prompt)
for token in generator:
assert isinstance(token, str)
def test_cohere_streaming_mode_async(cohere_instance):
# Test the async streaming mode for large text generation
cohere_instance.model = "base"
cohere_instance.streaming = True
prompt = "Generate a lengthy text using async streaming mode."
async_generator = cohere_instance.async_stream(prompt)
for token in async_generator:
assert isinstance(token, str)
def test_cohere_wrap_prompt(cohere_instance):
prompt = "What is the meaning of life?"
wrapped_prompt = cohere_instance._wrap_prompt(prompt)
@ -210,7 +268,7 @@ def test_cohere_call_with_embed_multilingual_v3_model(cohere_instance):
def test_cohere_call_with_invalid_model(cohere_instance):
cohere_instance.model = "invalid-model"
with pytest.raises(ValueError):
response = cohere_instance("Translate to French.")
cohere_instance("Translate to French.")
def test_cohere_call_with_long_prompt(cohere_instance):
@ -223,7 +281,7 @@ def test_cohere_call_with_max_tokens_limit_exceeded(cohere_instance):
cohere_instance.max_tokens = 10
prompt = "This is a test prompt that will exceed the max tokens limit."
with pytest.raises(ValueError):
response = cohere_instance(prompt)
cohere_instance(prompt)
def test_cohere_stream_with_command_model(cohere_instance):
@ -346,64 +404,6 @@ def test_cohere_async_stream_with_embed_multilingual_v3_model(cohere_instance):
assert isinstance(token, str)
def test_cohere_custom_configuration(cohere_instance):
# Test customizing Cohere configurations
cohere_instance.model = "base"
cohere_instance.temperature = 0.5
cohere_instance.max_tokens = 100
cohere_instance.k = 1
cohere_instance.p = 0.8
cohere_instance.frequency_penalty = 0.2
cohere_instance.presence_penalty = 0.4
response = cohere_instance("Customize configurations.")
assert isinstance(response, str)
def test_cohere_api_error_handling(cohere_instance):
# Test error handling when the API key is invalid
cohere_instance.model = "base"
cohere_instance.cohere_api_key = "invalid-api-key"
with pytest.raises(Exception):
response = cohere_instance("Error handling with invalid API key.")
def test_cohere_async_api_error_handling(cohere_instance):
# Test async error handling when the API key is invalid
cohere_instance.model = "base"
cohere_instance.cohere_api_key = "invalid-api-key"
with pytest.raises(Exception):
response = cohere_instance.async_call("Error handling with invalid API key.")
def test_cohere_stream_api_error_handling(cohere_instance):
# Test error handling in streaming mode when the API key is invalid
cohere_instance.model = "base"
cohere_instance.cohere_api_key = "invalid-api-key"
with pytest.raises(Exception):
generator = cohere_instance.stream("Error handling with invalid API key.")
for token in generator:
pass
def test_cohere_streaming_mode(cohere_instance):
# Test the streaming mode for large text generation
cohere_instance.model = "base"
cohere_instance.streaming = True
prompt = "Generate a lengthy text using streaming mode."
generator = cohere_instance.stream(prompt)
for token in generator:
assert isinstance(token, str)
def test_cohere_streaming_mode_async(cohere_instance):
# Test the async streaming mode for large text generation
cohere_instance.model = "base"
cohere_instance.streaming = True
prompt = "Generate a lengthy text using async streaming mode."
async_generator = cohere_instance.async_stream(prompt)
for token in async_generator:
assert isinstance(token, str)
def test_cohere_representation_model_embedding(cohere_instance):
# Test using the Representation model for text embedding
@ -435,7 +435,7 @@ def test_cohere_representation_model_max_tokens_limit_exceeded(cohere_instance):
cohere_instance.max_tokens = 10
prompt = "This is a test prompt that will exceed the max tokens limit."
with pytest.raises(ValueError):
embedding = cohere_instance.embed(prompt)
cohere_instance.embed(prompt)
# Add more production-grade test cases based on real-world scenarios
@ -475,7 +475,7 @@ def test_cohere_representation_model_multilingual_max_tokens_limit_exceeded(
cohere_instance.max_tokens = 10
prompt = "This is a test prompt that will exceed the max tokens limit for multilingual model."
with pytest.raises(ValueError):
embedding = cohere_instance.embed(prompt)
cohere_instance.embed(prompt)
def test_cohere_representation_model_multilingual_light_embedding(cohere_instance):
@ -514,7 +514,7 @@ def test_cohere_representation_model_multilingual_light_max_tokens_limit_exceede
cohere_instance.max_tokens = 10
prompt = "This is a test prompt that will exceed the max tokens limit for multilingual light model."
with pytest.raises(ValueError):
embedding = cohere_instance.embed(prompt)
cohere_instance.embed(prompt)
def test_cohere_command_light_model(cohere_instance):
@ -570,7 +570,7 @@ def test_cohere_representation_model_english_max_tokens_limit_exceeded(cohere_in
"This is a test prompt that will exceed the max tokens limit for English model."
)
with pytest.raises(ValueError):
embedding = cohere_instance.embed(prompt)
cohere_instance.embed(prompt)
def test_cohere_representation_model_english_light_embedding(cohere_instance):
@ -607,7 +607,7 @@ def test_cohere_representation_model_english_light_max_tokens_limit_exceeded(
cohere_instance.max_tokens = 10
prompt = "This is a test prompt that will exceed the max tokens limit for English light model."
with pytest.raises(ValueError):
embedding = cohere_instance.embed(prompt)
cohere_instance.embed(prompt)
def test_cohere_command_model(cohere_instance):
@ -624,18 +624,7 @@ def test_cohere_invalid_model(cohere_instance):
# Test using an invalid model name
cohere_instance.model = "invalid-model"
with pytest.raises(ValueError):
response = cohere_instance("Generate text using an invalid model.")
def test_cohere_streaming_generation(cohere_instance):
# Test streaming generation with the Command model
cohere_instance.model = "command"
prompt = "Generate text using streaming."
chunks = list(cohere_instance.stream(prompt))
assert isinstance(chunks, list)
assert len(chunks) > 0
assert all(isinstance(chunk, GenerationChunk) for chunk in chunks)
cohere_instance("Generate text using an invalid model.")
def test_cohere_base_model_generation_with_max_tokens(cohere_instance):
# Test generating text using the base model with a specified max_tokens limit

Loading…
Cancel
Save