Former-commit-id: 07bcd22eef
grit/923f7c6f-0958-480b-8748-ea6bbf1c2084
2.1.9
parent
bf9a747fa3
commit
4e2ce705aa
@ -1,29 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from swarms.models.revgptV4 import RevChatGPTModelv4
|
|
||||||
from swarms.models.revgptV1 import RevChatGPTModelv1
|
|
||||||
|
|
||||||
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
|
||||||
sys.path.append(root_dir)
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
config = {
|
|
||||||
"model": os.getenv("REVGPT_MODEL"),
|
|
||||||
"plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")],
|
|
||||||
"disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True",
|
|
||||||
"PUID": os.getenv("REVGPT_PUID"),
|
|
||||||
"unverified_plugin_domains": [os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")],
|
|
||||||
}
|
|
||||||
|
|
||||||
# For v1 model
|
|
||||||
model = RevChatGPTModelv1(access_token=os.getenv("ACCESS_TOKEN"), **config)
|
|
||||||
# model = RevChatGPTModelv4(access_token=os.getenv("ACCESS_TOKEN"), **config)
|
|
||||||
|
|
||||||
# For v3 model
|
|
||||||
# model = RevChatGPTModel(access_token=os.getenv("OPENAI_API_KEY"), **config)
|
|
||||||
|
|
||||||
task = "Write a cli snake game"
|
|
||||||
response = model.run(task)
|
|
||||||
print(response)
|
|
@ -1,433 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
from typing import Any, Callable, List, Optional
|
|
||||||
|
|
||||||
from langchain.chains.llm import LLMChain
|
|
||||||
from langchain.chat_models.base import BaseChatModel
|
|
||||||
from langchain.memory import ChatMessageHistory
|
|
||||||
from langchain.prompts.chat import (
|
|
||||||
BaseChatPromptTemplate,
|
|
||||||
)
|
|
||||||
from langchain.schema import (
|
|
||||||
BaseChatMessageHistory,
|
|
||||||
Document,
|
|
||||||
)
|
|
||||||
from langchain.schema.messages import (
|
|
||||||
AIMessage,
|
|
||||||
BaseMessage,
|
|
||||||
HumanMessage,
|
|
||||||
SystemMessage,
|
|
||||||
)
|
|
||||||
from langchain.schema.vectorstore import VectorStoreRetriever
|
|
||||||
from langchain.tools.base import BaseTool
|
|
||||||
from langchain.tools.human.tool import HumanInputRun
|
|
||||||
from langchain_experimental.autonomous_agents.autogpt.output_parser import (
|
|
||||||
AutoGPTOutputParser,
|
|
||||||
BaseAutoGPTOutputParser,
|
|
||||||
)
|
|
||||||
from langchain_experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
|
|
||||||
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import (
|
|
||||||
FINISH_NAME,
|
|
||||||
get_prompt,
|
|
||||||
)
|
|
||||||
from langchain_experimental.pydantic_v1 import BaseModel, ValidationError
|
|
||||||
|
|
||||||
# PROMPT
|
|
||||||
FINISH_NAME = "finish"
|
|
||||||
|
|
||||||
|
|
||||||
# This class has a metaclass conflict: both `BaseChatPromptTemplate` and `BaseModel`
|
|
||||||
# define a metaclass to use, and the two metaclasses attempt to define
|
|
||||||
# the same functions but in mutually-incompatible ways.
|
|
||||||
# It isn't clear how to resolve this, and this code predates mypy
|
|
||||||
# beginning to perform that check.
|
|
||||||
#
|
|
||||||
# Mypy errors:
|
|
||||||
# ```
|
|
||||||
# Definition of "__private_attributes__" in base class "BaseModel" is
|
|
||||||
# incompatible with definition in base class "BaseModel" [misc]
|
|
||||||
# Definition of "__repr_name__" in base class "Representation" is
|
|
||||||
# incompatible with definition in base class "BaseModel" [misc]
|
|
||||||
# Definition of "__pretty__" in base class "Representation" is
|
|
||||||
# incompatible with definition in base class "BaseModel" [misc]
|
|
||||||
# Definition of "__repr_str__" in base class "Representation" is
|
|
||||||
# incompatible with definition in base class "BaseModel" [misc]
|
|
||||||
# Definition of "__rich_repr__" in base class "Representation" is
|
|
||||||
# incompatible with definition in base class "BaseModel" [misc]
|
|
||||||
# Metaclass conflict: the metaclass of a derived class must be
|
|
||||||
# a (non-strict) subclass of the metaclasses of all its bases [misc]
|
|
||||||
# ```
|
|
||||||
#
|
|
||||||
# TODO: look into refactoring this class in a way that avoids the mypy type errors
|
|
||||||
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): # type: ignore[misc]
|
|
||||||
"""Prompt for AutoGPT."""
|
|
||||||
|
|
||||||
ai_name: str
|
|
||||||
ai_role: str
|
|
||||||
tools: List[BaseTool]
|
|
||||||
token_counter: Callable[[str], int]
|
|
||||||
send_token_limit: int = 4196
|
|
||||||
|
|
||||||
def construct_full_prompt(self, goals: List[str]) -> str:
|
|
||||||
prompt_start = (
|
|
||||||
"Your decisions must always be made independently "
|
|
||||||
"without seeking user assistance.\n"
|
|
||||||
"Play to your strengths as an LLM and pursue simple "
|
|
||||||
"strategies with no legal complications.\n"
|
|
||||||
"If you have completed all your tasks, make sure to "
|
|
||||||
'use the "finish" command.'
|
|
||||||
)
|
|
||||||
# Construct full prompt
|
|
||||||
full_prompt = (
|
|
||||||
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
|
||||||
)
|
|
||||||
for i, goal in enumerate(goals):
|
|
||||||
full_prompt += f"{i+1}. {goal}\n"
|
|
||||||
|
|
||||||
full_prompt += f"\n\n{get_prompt(self.tools)}"
|
|
||||||
return full_prompt
|
|
||||||
|
|
||||||
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
|
|
||||||
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"]))
|
|
||||||
time_prompt = SystemMessage(
|
|
||||||
content=f"The current time and date is {time.strftime('%c')}"
|
|
||||||
)
|
|
||||||
used_tokens = self.token_counter(base_prompt.content) + self.token_counter(
|
|
||||||
time_prompt.content
|
|
||||||
)
|
|
||||||
memory: VectorStoreRetriever = kwargs["memory"]
|
|
||||||
previous_messages = kwargs["messages"]
|
|
||||||
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
|
|
||||||
relevant_memory = [d.page_content for d in relevant_docs]
|
|
||||||
relevant_memory_tokens = sum(
|
|
||||||
[self.token_counter(doc) for doc in relevant_memory]
|
|
||||||
)
|
|
||||||
while used_tokens + relevant_memory_tokens > 2500:
|
|
||||||
relevant_memory = relevant_memory[:-1]
|
|
||||||
relevant_memory_tokens = sum(
|
|
||||||
[self.token_counter(doc) for doc in relevant_memory]
|
|
||||||
)
|
|
||||||
content_format = (
|
|
||||||
f"This reminds you of these events from your past:\n{relevant_memory}\n\n"
|
|
||||||
)
|
|
||||||
memory_message = SystemMessage(content=content_format)
|
|
||||||
used_tokens += self.token_counter(memory_message.content)
|
|
||||||
historical_messages: List[BaseMessage] = []
|
|
||||||
for message in previous_messages[-10:][::-1]:
|
|
||||||
message_tokens = self.token_counter(message.content)
|
|
||||||
if used_tokens + message_tokens > self.send_token_limit - 1000:
|
|
||||||
break
|
|
||||||
historical_messages = [message] + historical_messages
|
|
||||||
used_tokens += message_tokens
|
|
||||||
input_message = HumanMessage(content=kwargs["user_input"])
|
|
||||||
messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message]
|
|
||||||
messages += historical_messages
|
|
||||||
messages.append(input_message)
|
|
||||||
return messages
|
|
||||||
|
|
||||||
|
|
||||||
class PromptGenerator:
|
|
||||||
"""A class for generating custom prompt strings.
|
|
||||||
|
|
||||||
Does this based on constraints, commands, resources, and performance evaluations.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self) -> None:
|
|
||||||
"""Initialize the PromptGenerator object.
|
|
||||||
|
|
||||||
Starts with empty lists of constraints, commands, resources,
|
|
||||||
and performance evaluations.
|
|
||||||
"""
|
|
||||||
self.constraints: List[str] = []
|
|
||||||
self.commands: List[BaseTool] = []
|
|
||||||
self.resources: List[str] = []
|
|
||||||
self.performance_evaluation: List[str] = []
|
|
||||||
self.response_format = {
|
|
||||||
"thoughts": {
|
|
||||||
"text": "thought",
|
|
||||||
"reasoning": "reasoning",
|
|
||||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
|
||||||
"criticism": "constructive self-criticism",
|
|
||||||
"speak": "thoughts summary to say to user",
|
|
||||||
},
|
|
||||||
"command": {"name": "command name", "args": {"arg name": "value"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
def add_constraint(self, constraint: str) -> None:
|
|
||||||
"""
|
|
||||||
Add a constraint to the constraints list.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
constraint (str): The constraint to be added.
|
|
||||||
"""
|
|
||||||
self.constraints.append(constraint)
|
|
||||||
|
|
||||||
def add_tool(self, tool: BaseTool) -> None:
|
|
||||||
self.commands.append(tool)
|
|
||||||
|
|
||||||
def _generate_command_string(self, tool: BaseTool) -> str:
|
|
||||||
output = f"{tool.name}: {tool.description}"
|
|
||||||
output += f", args json schema: {json.dumps(tool.args)}"
|
|
||||||
return output
|
|
||||||
|
|
||||||
def add_resource(self, resource: str) -> None:
|
|
||||||
"""
|
|
||||||
Add a resource to the resources list.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
resource (str): The resource to be added.
|
|
||||||
"""
|
|
||||||
self.resources.append(resource)
|
|
||||||
|
|
||||||
def add_performance_evaluation(self, evaluation: str) -> None:
|
|
||||||
"""
|
|
||||||
Add a performance evaluation item to the performance_evaluation list.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
evaluation (str): The evaluation item to be added.
|
|
||||||
"""
|
|
||||||
self.performance_evaluation.append(evaluation)
|
|
||||||
|
|
||||||
def _generate_numbered_list(self, items: list, item_type: str = "list") -> str:
|
|
||||||
"""
|
|
||||||
Generate a numbered list from given items based on the item_type.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
items (list): A list of items to be numbered.
|
|
||||||
item_type (str, optional): The type of items in the list.
|
|
||||||
Defaults to 'list'.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The formatted numbered list.
|
|
||||||
"""
|
|
||||||
if item_type == "command":
|
|
||||||
command_strings = [
|
|
||||||
f"{i + 1}. {self._generate_command_string(item)}"
|
|
||||||
for i, item in enumerate(items)
|
|
||||||
]
|
|
||||||
finish_description = (
|
|
||||||
"use this to signal that you have finished all your objectives"
|
|
||||||
)
|
|
||||||
finish_args = (
|
|
||||||
'"response": "final response to let '
|
|
||||||
'people know you have finished your objectives"'
|
|
||||||
)
|
|
||||||
finish_string = (
|
|
||||||
f"{len(items) + 1}. {FINISH_NAME}: "
|
|
||||||
f"{finish_description}, args: {finish_args}"
|
|
||||||
)
|
|
||||||
return "\n".join(command_strings + [finish_string])
|
|
||||||
else:
|
|
||||||
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
|
||||||
|
|
||||||
def generate_prompt_string(self) -> str:
|
|
||||||
"""Generate a prompt string.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The generated prompt string.
|
|
||||||
"""
|
|
||||||
formatted_response_format = json.dumps(self.response_format, indent=4)
|
|
||||||
prompt_string = (
|
|
||||||
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
|
|
||||||
"Commands:\n"
|
|
||||||
f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
|
|
||||||
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
|
|
||||||
"Performance Evaluation:\n"
|
|
||||||
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
|
||||||
"You should only respond in JSON format as described below "
|
|
||||||
f"\nResponse Format: \n{formatted_response_format} "
|
|
||||||
"\nEnsure the response can be parsed by Python json.loads"
|
|
||||||
)
|
|
||||||
|
|
||||||
return prompt_string
|
|
||||||
|
|
||||||
|
|
||||||
def get_prompt(tools: List[BaseTool]) -> str:
|
|
||||||
"""Generates a prompt string.
|
|
||||||
|
|
||||||
It includes various constraints, commands, resources, and performance evaluations.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The generated prompt string.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Initialize the PromptGenerator object
|
|
||||||
prompt_generator = PromptGenerator()
|
|
||||||
|
|
||||||
# Add constraints to the PromptGenerator object
|
|
||||||
prompt_generator.add_constraint(
|
|
||||||
"~16000 word limit for short term memory. "
|
|
||||||
"Your short term memory is short, "
|
|
||||||
"so immediately save important information to files."
|
|
||||||
)
|
|
||||||
prompt_generator.add_constraint(
|
|
||||||
"If you are unsure how you previously did something "
|
|
||||||
"or want to recall past events, "
|
|
||||||
"thinking about similar events will help you remember."
|
|
||||||
)
|
|
||||||
prompt_generator.add_constraint("No user assistance")
|
|
||||||
prompt_generator.add_constraint(
|
|
||||||
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add commands to the PromptGenerator object
|
|
||||||
for tool in tools:
|
|
||||||
prompt_generator.add_tool(tool)
|
|
||||||
|
|
||||||
# Add resources to the PromptGenerator object
|
|
||||||
prompt_generator.add_resource(
|
|
||||||
"Internet access for searches and information gathering."
|
|
||||||
)
|
|
||||||
prompt_generator.add_resource("Long Term memory management.")
|
|
||||||
prompt_generator.add_resource(
|
|
||||||
"GPT-3.5 powered Agents for delegation of simple tasks."
|
|
||||||
)
|
|
||||||
prompt_generator.add_resource("File output.")
|
|
||||||
|
|
||||||
# Add performance evaluations to the PromptGenerator object
|
|
||||||
prompt_generator.add_performance_evaluation(
|
|
||||||
"Continuously review and analyze your actions "
|
|
||||||
"to ensure you are performing to the best of your abilities."
|
|
||||||
)
|
|
||||||
prompt_generator.add_performance_evaluation(
|
|
||||||
"Constructively self-criticize your big-picture behavior constantly."
|
|
||||||
)
|
|
||||||
prompt_generator.add_performance_evaluation(
|
|
||||||
"Reflect on past decisions and strategies to refine your approach."
|
|
||||||
)
|
|
||||||
prompt_generator.add_performance_evaluation(
|
|
||||||
"Every command has a cost, so be smart and efficient. "
|
|
||||||
"Aim to complete tasks in the least number of steps."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Generate the prompt string
|
|
||||||
prompt_string = prompt_generator.generate_prompt_string()
|
|
||||||
|
|
||||||
return prompt_string
|
|
||||||
|
|
||||||
|
|
||||||
class AutoGPT:
|
|
||||||
"""
|
|
||||||
AutoAgent:
|
|
||||||
|
|
||||||
|
|
||||||
Args:
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
ai_name: str,
|
|
||||||
memory: VectorStoreRetriever,
|
|
||||||
chain: LLMChain,
|
|
||||||
output_parser: BaseAutoGPTOutputParser,
|
|
||||||
tools: List[BaseTool],
|
|
||||||
feedback_tool: Optional[HumanInputRun] = None,
|
|
||||||
chat_history_memory: Optional[BaseChatMessageHistory] = None,
|
|
||||||
):
|
|
||||||
self.ai_name = ai_name
|
|
||||||
self.memory = memory
|
|
||||||
self.next_action_count = 0
|
|
||||||
self.chain = chain
|
|
||||||
self.output_parser = output_parser
|
|
||||||
self.tools = tools
|
|
||||||
self.feedback_tool = feedback_tool
|
|
||||||
self.chat_history_memory = chat_history_memory or ChatMessageHistory()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_llm_and_tools(
|
|
||||||
cls,
|
|
||||||
ai_name: str,
|
|
||||||
ai_role: str,
|
|
||||||
memory: VectorStoreRetriever,
|
|
||||||
tools: List[BaseTool],
|
|
||||||
llm: BaseChatModel,
|
|
||||||
human_in_the_loop: bool = False,
|
|
||||||
output_parser: Optional[BaseAutoGPTOutputParser] = None,
|
|
||||||
chat_history_memory: Optional[BaseChatMessageHistory] = None,
|
|
||||||
) -> AutoGPT:
|
|
||||||
prompt = AutoGPTPrompt(
|
|
||||||
ai_name=ai_name,
|
|
||||||
ai_role=ai_role,
|
|
||||||
tools=tools,
|
|
||||||
input_variables=["memory", "messages", "goals", "user_input"],
|
|
||||||
token_counter=llm.get_num_tokens,
|
|
||||||
)
|
|
||||||
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
|
|
||||||
chain = LLMChain(llm=llm, prompt=prompt)
|
|
||||||
return cls(
|
|
||||||
ai_name,
|
|
||||||
memory,
|
|
||||||
chain,
|
|
||||||
output_parser or AutoGPTOutputParser(),
|
|
||||||
tools,
|
|
||||||
feedback_tool=human_feedback_tool,
|
|
||||||
chat_history_memory=chat_history_memory,
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self, goals: List[str]) -> str:
|
|
||||||
user_input = (
|
|
||||||
"Determine which next command to use, "
|
|
||||||
"and respond using the format specified above:"
|
|
||||||
)
|
|
||||||
# Interaction Loop
|
|
||||||
loop_count = 0
|
|
||||||
while True:
|
|
||||||
# Discontinue if continuous limit is reached
|
|
||||||
loop_count += 1
|
|
||||||
|
|
||||||
# Send message to AI, get response
|
|
||||||
assistant_reply = self.chain.run(
|
|
||||||
goals=goals,
|
|
||||||
messages=self.chat_history_memory.messages,
|
|
||||||
memory=self.memory,
|
|
||||||
user_input=user_input,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Print Assistant thoughts
|
|
||||||
print(assistant_reply)
|
|
||||||
self.chat_history_memory.add_message(HumanMessage(content=user_input))
|
|
||||||
self.chat_history_memory.add_message(AIMessage(content=assistant_reply))
|
|
||||||
|
|
||||||
# Get command name and arguments
|
|
||||||
action = self.output_parser.parse(assistant_reply)
|
|
||||||
tools = {t.name: t for t in self.tools}
|
|
||||||
if action.name == FINISH_NAME:
|
|
||||||
return action.args["response"]
|
|
||||||
if action.name in tools:
|
|
||||||
tool = tools[action.name]
|
|
||||||
try:
|
|
||||||
observation = tool.run(action.args)
|
|
||||||
except ValidationError as e:
|
|
||||||
observation = (
|
|
||||||
f"Validation Error in args: {str(e)}, args: {action.args}"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
observation = (
|
|
||||||
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
|
|
||||||
)
|
|
||||||
result = f"Command {tool.name} returned: {observation}"
|
|
||||||
elif action.name == "ERROR":
|
|
||||||
result = f"Error: {action.args}. "
|
|
||||||
else:
|
|
||||||
result = (
|
|
||||||
f"Unknown command '{action.name}'. "
|
|
||||||
"Please refer to the 'COMMANDS' list for available "
|
|
||||||
"commands and only respond in the specified JSON format."
|
|
||||||
)
|
|
||||||
|
|
||||||
memory_to_add = f"Assistant Reply: {assistant_reply} \nResult: {result} "
|
|
||||||
if self.feedback_tool is not None:
|
|
||||||
feedback = f"\n{self.feedback_tool.run('Input: ')}"
|
|
||||||
if feedback in {"q", "stop"}:
|
|
||||||
print("EXITING")
|
|
||||||
return "EXITING"
|
|
||||||
memory_to_add += feedback
|
|
||||||
|
|
||||||
self.memory.add_documents([Document(page_content=memory_to_add)])
|
|
||||||
self.chat_history_memory.add_message(SystemMessage(content=result))
|
|
@ -1,4 +0,0 @@
|
|||||||
"""
|
|
||||||
Companion agents converse with the user about the agent the user wants to create then creates the agent with the desired attributes and traits and tools and configurations
|
|
||||||
|
|
||||||
"""
|
|
@ -1,599 +0,0 @@
|
|||||||
import importlib.util
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import Dict
|
|
||||||
|
|
||||||
from huggingface_hub import hf_hub_download, list_spaces
|
|
||||||
from transformers.tools.base import (
|
|
||||||
TASK_MAPPING,
|
|
||||||
TOOL_CONFIG_FILE,
|
|
||||||
Tool,
|
|
||||||
load_tool,
|
|
||||||
supports_remote,
|
|
||||||
)
|
|
||||||
from transformers.tools.prompts import CHAT_MESSAGE_PROMPT, download_prompt
|
|
||||||
from transformers.tools.python_interpreter import evaluate
|
|
||||||
from transformers.utils import is_offline_mode, is_openai_available, logging
|
|
||||||
|
|
||||||
# utils
|
|
||||||
logger = logging.get_logger(__name__)
|
|
||||||
|
|
||||||
if is_openai_available():
|
|
||||||
import openai
|
|
||||||
|
|
||||||
else:
|
|
||||||
StoppingCriteria = object
|
|
||||||
|
|
||||||
_tools_are_initialized = False
|
|
||||||
|
|
||||||
BASE_PYTHON_TOOLS = {
|
|
||||||
"print": print,
|
|
||||||
"range": range,
|
|
||||||
"float": float,
|
|
||||||
"int": int,
|
|
||||||
"bool": bool,
|
|
||||||
"str": str,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PreTool:
|
|
||||||
task: str
|
|
||||||
description: str
|
|
||||||
repo_id: str
|
|
||||||
|
|
||||||
|
|
||||||
HUGGINGFACE_DEFAULT_TOOLS = {}
|
|
||||||
|
|
||||||
HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB = [
|
|
||||||
"image-transformation",
|
|
||||||
"text-download",
|
|
||||||
"text-to-image",
|
|
||||||
"text-to-video",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def get_remote_tools(organization="huggingface-tools"):
|
|
||||||
if is_offline_mode():
|
|
||||||
logger.info("You are in offline mode, so remote tools are not available.")
|
|
||||||
return {}
|
|
||||||
|
|
||||||
spaces = list_spaces(author=organization)
|
|
||||||
tools = {}
|
|
||||||
for space_info in spaces:
|
|
||||||
repo_id = space_info.id
|
|
||||||
resolved_config_file = hf_hub_download(
|
|
||||||
repo_id, TOOL_CONFIG_FILE, repo_type="space"
|
|
||||||
)
|
|
||||||
with open(resolved_config_file, encoding="utf-8") as reader:
|
|
||||||
config = json.load(reader)
|
|
||||||
|
|
||||||
task = repo_id.split("/")[-1]
|
|
||||||
tools[config["name"]] = PreTool(
|
|
||||||
task=task, description=config["description"], repo_id=repo_id
|
|
||||||
)
|
|
||||||
|
|
||||||
return tools
|
|
||||||
|
|
||||||
|
|
||||||
def _setup_default_tools():
|
|
||||||
global HUGGINGFACE_DEFAULT_TOOLS
|
|
||||||
global _tools_are_initialized
|
|
||||||
|
|
||||||
if _tools_are_initialized:
|
|
||||||
return
|
|
||||||
|
|
||||||
main_module = importlib.import_module("transformers")
|
|
||||||
tools_module = main_module.tools
|
|
||||||
|
|
||||||
remote_tools = get_remote_tools()
|
|
||||||
for task_name, tool_class_name in TASK_MAPPING.items():
|
|
||||||
tool_class = getattr(tools_module, tool_class_name)
|
|
||||||
description = tool_class.description
|
|
||||||
HUGGINGFACE_DEFAULT_TOOLS[tool_class.name] = PreTool(
|
|
||||||
task=task_name, description=description, repo_id=None
|
|
||||||
)
|
|
||||||
|
|
||||||
if not is_offline_mode():
|
|
||||||
for task_name in HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB:
|
|
||||||
found = False
|
|
||||||
for tool_name, tool in remote_tools.items():
|
|
||||||
if tool.task == task_name:
|
|
||||||
HUGGINGFACE_DEFAULT_TOOLS[tool_name] = tool
|
|
||||||
found = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if not found:
|
|
||||||
raise ValueError(f"{task_name} is not implemented on the Hub.")
|
|
||||||
|
|
||||||
_tools_are_initialized = True
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_tools(code, toolbox, remote=False, cached_tools=None):
|
|
||||||
if cached_tools is None:
|
|
||||||
resolved_tools = BASE_PYTHON_TOOLS.copy()
|
|
||||||
else:
|
|
||||||
resolved_tools = cached_tools
|
|
||||||
for name, tool in toolbox.items():
|
|
||||||
if name not in code or name in resolved_tools:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if isinstance(tool, Tool):
|
|
||||||
resolved_tools[name] = tool
|
|
||||||
else:
|
|
||||||
task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id
|
|
||||||
_remote = remote and supports_remote(task_or_repo_id)
|
|
||||||
resolved_tools[name] = load_tool(task_or_repo_id, remote=_remote)
|
|
||||||
|
|
||||||
return resolved_tools
|
|
||||||
|
|
||||||
|
|
||||||
def get_tool_creation_code(code, toolbox, remote=False):
|
|
||||||
code_lines = ["from transformers import load_tool", ""]
|
|
||||||
for name, tool in toolbox.items():
|
|
||||||
if name not in code or isinstance(tool, Tool):
|
|
||||||
continue
|
|
||||||
|
|
||||||
task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id
|
|
||||||
line = f'{name} = load_tool("{task_or_repo_id}"'
|
|
||||||
if remote:
|
|
||||||
line += ", remote=True"
|
|
||||||
line += ")"
|
|
||||||
code_lines.append(line)
|
|
||||||
|
|
||||||
return "\n".join(code_lines) + "\n"
|
|
||||||
|
|
||||||
|
|
||||||
def clean_code_for_chat(result):
|
|
||||||
lines = result.split("\n")
|
|
||||||
idx = 0
|
|
||||||
while idx < len(lines) and not lines[idx].lstrip().startswith("```"):
|
|
||||||
idx += 1
|
|
||||||
explanation = "\n".join(lines[:idx]).strip()
|
|
||||||
if idx == len(lines):
|
|
||||||
return explanation, None
|
|
||||||
|
|
||||||
idx += 1
|
|
||||||
start_idx = idx
|
|
||||||
while not lines[idx].lstrip().startswith("```"):
|
|
||||||
idx += 1
|
|
||||||
code = "\n".join(lines[start_idx:idx]).strip()
|
|
||||||
|
|
||||||
return explanation, code
|
|
||||||
|
|
||||||
|
|
||||||
def clean_code_for_run(result):
|
|
||||||
result = f"I will use the following {result}"
|
|
||||||
explanation, code = result.split("Answer:")
|
|
||||||
explanation = explanation.strip()
|
|
||||||
code = code.strip()
|
|
||||||
|
|
||||||
code_lines = code.split("\n")
|
|
||||||
if code_lines[0] in ["```", "```py", "```python"]:
|
|
||||||
code_lines = code_lines[1:]
|
|
||||||
if code_lines[-1] == "```":
|
|
||||||
code_lines = code_lines[:-1]
|
|
||||||
code = "\n".join(code_lines)
|
|
||||||
|
|
||||||
return explanation, code
|
|
||||||
|
|
||||||
|
|
||||||
class Agent:
|
|
||||||
"""
|
|
||||||
Base class for all agents which contains the main API methods.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chat_prompt_template (`str`, *optional*):
|
|
||||||
Pass along your own prompt if you want to override the default template for the `chat` method. Can be the
|
|
||||||
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
|
|
||||||
`chat_prompt_template.txt` in this repo in this case.
|
|
||||||
run_prompt_template (`str`, *optional*):
|
|
||||||
Pass along your own prompt if you want to override the default template for the `run` method. Can be the
|
|
||||||
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
|
|
||||||
`run_prompt_template.txt` in this repo in this case.
|
|
||||||
additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*):
|
|
||||||
Any additional tools to include on top of the default ones. If you pass along a tool with the same name as
|
|
||||||
one of the default tools, that default tool will be overridden.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None
|
|
||||||
):
|
|
||||||
_setup_default_tools()
|
|
||||||
|
|
||||||
agent_name = self.__class__.__name__
|
|
||||||
self.chat_prompt_template = download_prompt(
|
|
||||||
chat_prompt_template, agent_name, mode="chat"
|
|
||||||
)
|
|
||||||
self.run_prompt_template = download_prompt(
|
|
||||||
run_prompt_template, agent_name, mode="run"
|
|
||||||
)
|
|
||||||
self._toolbox = HUGGINGFACE_DEFAULT_TOOLS.copy()
|
|
||||||
self.log = print
|
|
||||||
if additional_tools is not None:
|
|
||||||
if isinstance(additional_tools, (list, tuple)):
|
|
||||||
additional_tools = {t.name: t for t in additional_tools}
|
|
||||||
elif not isinstance(additional_tools, dict):
|
|
||||||
additional_tools = {additional_tools.name: additional_tools}
|
|
||||||
|
|
||||||
replacements = {
|
|
||||||
name: tool
|
|
||||||
for name, tool in additional_tools.items()
|
|
||||||
if name in HUGGINGFACE_DEFAULT_TOOLS
|
|
||||||
}
|
|
||||||
self._toolbox.update(additional_tools)
|
|
||||||
if len(replacements) > 1:
|
|
||||||
names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()])
|
|
||||||
logger.warning(
|
|
||||||
"The following tools have been replaced by the ones provided in"
|
|
||||||
f" `additional_tools`:\n{names}."
|
|
||||||
)
|
|
||||||
elif len(replacements) == 1:
|
|
||||||
name = list(replacements.keys())[0]
|
|
||||||
logger.warning(
|
|
||||||
f"{name} has been replaced by {replacements[name]} as provided in"
|
|
||||||
" `additional_tools`."
|
|
||||||
)
|
|
||||||
|
|
||||||
self.prepare_for_new_chat()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def toolbox(self) -> Dict[str, Tool]:
|
|
||||||
"""Get all tool currently available to the agent"""
|
|
||||||
return self._toolbox
|
|
||||||
|
|
||||||
def format_prompt(self, task, chat_mode=False):
|
|
||||||
description = "\n".join(
|
|
||||||
[f"- {name}: {tool.description}" for name, tool in self.toolbox.items()]
|
|
||||||
)
|
|
||||||
if chat_mode:
|
|
||||||
if self.chat_history is None:
|
|
||||||
prompt = self.chat_prompt_template.replace("<<all_tools>>", description)
|
|
||||||
else:
|
|
||||||
prompt = self.chat_history
|
|
||||||
prompt += CHAT_MESSAGE_PROMPT.replace("<<task>>", task)
|
|
||||||
else:
|
|
||||||
prompt = self.run_prompt_template.replace("<<all_tools>>", description)
|
|
||||||
prompt = prompt.replace("<<prompt>>", task)
|
|
||||||
return prompt
|
|
||||||
|
|
||||||
def set_stream(self, streamer):
|
|
||||||
"""
|
|
||||||
Set the function use to stream results (which is `print` by default).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
streamer (`callable`): The function to call when streaming results from the LLM.
|
|
||||||
"""
|
|
||||||
self.log = streamer
|
|
||||||
|
|
||||||
def chat(self, task, *, return_code=False, remote=False, **kwargs):
|
|
||||||
"""
|
|
||||||
Sends a new request to the agent in a chat. Will use the previous ones in its history.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (`str`): The task to perform
|
|
||||||
return_code (`bool`, *optional*, defaults to `False`):
|
|
||||||
Whether to just return code and not evaluate it.
|
|
||||||
remote (`bool`, *optional*, defaults to `False`):
|
|
||||||
Whether or not to use remote tools (inference endpoints) instead of local ones.
|
|
||||||
kwargs (additional keyword arguments, *optional*):
|
|
||||||
Any keyword argument to send to the agent when evaluating the code.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from transformers import HfAgent
|
|
||||||
|
|
||||||
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder")
|
|
||||||
agent.chat("Draw me a picture of rivers and lakes")
|
|
||||||
|
|
||||||
agent.chat("Transform the picture so that there is a rock in there")
|
|
||||||
```
|
|
||||||
"""
|
|
||||||
prompt = self.format_prompt(task, chat_mode=True)
|
|
||||||
result = self.generate_one(prompt, stop=["Human:", "====="])
|
|
||||||
self.chat_history = prompt + result.strip() + "\n"
|
|
||||||
explanation, code = clean_code_for_chat(result)
|
|
||||||
|
|
||||||
self.log(f"==Explanation from the agent==\n{explanation}")
|
|
||||||
|
|
||||||
if code is not None:
|
|
||||||
self.log(f"\n\n==Code generated by the agent==\n{code}")
|
|
||||||
if not return_code:
|
|
||||||
self.log("\n\n==Result==")
|
|
||||||
self.cached_tools = resolve_tools(
|
|
||||||
code, self.toolbox, remote=remote, cached_tools=self.cached_tools
|
|
||||||
)
|
|
||||||
self.chat_state.update(kwargs)
|
|
||||||
return evaluate(
|
|
||||||
code, self.cached_tools, self.chat_state, chat_mode=True
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
tool_code = get_tool_creation_code(code, self.toolbox, remote=remote)
|
|
||||||
return f"{tool_code}\n{code}"
|
|
||||||
|
|
||||||
def prepare_for_new_chat(self):
|
|
||||||
"""
|
|
||||||
Clears the history of prior calls to [`~Agent.chat`].
|
|
||||||
"""
|
|
||||||
self.chat_history = None
|
|
||||||
self.chat_state = {}
|
|
||||||
self.cached_tools = None
|
|
||||||
|
|
||||||
def run(self, task, *, return_code=False, remote=False, **kwargs):
|
|
||||||
"""
|
|
||||||
Sends a request to the agent.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (`str`): The task to perform
|
|
||||||
return_code (`bool`, *optional*, defaults to `False`):
|
|
||||||
Whether to just return code and not evaluate it.
|
|
||||||
remote (`bool`, *optional*, defaults to `False`):
|
|
||||||
Whether or not to use remote tools (inference endpoints) instead of local ones.
|
|
||||||
kwargs (additional keyword arguments, *optional*):
|
|
||||||
Any keyword argument to send to the agent when evaluating the code.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from transformers import HfAgent
|
|
||||||
|
|
||||||
agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder")
|
|
||||||
agent.run("Draw me a picture of rivers and lakes")
|
|
||||||
```
|
|
||||||
"""
|
|
||||||
prompt = self.format_prompt(task)
|
|
||||||
result = self.generate_one(prompt, stop=["Task:"])
|
|
||||||
explanation, code = clean_code_for_run(result)
|
|
||||||
|
|
||||||
self.log(f"==Explanation from the agent==\n{explanation}")
|
|
||||||
|
|
||||||
self.log(f"\n\n==Code generated by the agent==\n{code}")
|
|
||||||
if not return_code:
|
|
||||||
self.log("\n\n==Result==")
|
|
||||||
self.cached_tools = resolve_tools(
|
|
||||||
code, self.toolbox, remote=remote, cached_tools=self.cached_tools
|
|
||||||
)
|
|
||||||
return evaluate(code, self.cached_tools, state=kwargs.copy())
|
|
||||||
else:
|
|
||||||
tool_code = get_tool_creation_code(code, self.toolbox, remote=remote)
|
|
||||||
return f"{tool_code}\n{code}"
|
|
||||||
|
|
||||||
def generate_one(self, prompt, stop):
|
|
||||||
# This is the method to implement in your custom agent.
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def generate_many(self, prompts, stop):
|
|
||||||
# Override if you have a way to do batch generation faster than one by one
|
|
||||||
return [self.generate_one(prompt, stop) for prompt in prompts]
|
|
||||||
|
|
||||||
|
|
||||||
class HFAgent(Agent):
|
|
||||||
"""
|
|
||||||
Agent that uses the openai API to generate code.
|
|
||||||
|
|
||||||
<Tip warning={true}>
|
|
||||||
|
|
||||||
The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like
|
|
||||||
`"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model (`str`, *optional*, defaults to `"text-davinci-003"`):
|
|
||||||
The name of the OpenAI model to use.
|
|
||||||
api_key (`str`, *optional*):
|
|
||||||
The API key to use. If unset, will look for the environment variable `"OPENAI_API_KEY"`.
|
|
||||||
chat_prompt_template (`str`, *optional*):
|
|
||||||
Pass along your own prompt if you want to override the default template for the `chat` method. Can be the
|
|
||||||
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
|
|
||||||
`chat_prompt_template.txt` in this repo in this case.
|
|
||||||
run_prompt_template (`str`, *optional*):
|
|
||||||
Pass along your own prompt if you want to override the default template for the `run` method. Can be the
|
|
||||||
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
|
|
||||||
`run_prompt_template.txt` in this repo in this case.
|
|
||||||
additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*):
|
|
||||||
Any additional tools to include on top of the default ones. If you pass along a tool with the same name as
|
|
||||||
one of the default tools, that default tool will be overridden.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from swarms.agents.hf_agents import HFAgent
|
|
||||||
|
|
||||||
agent = OpenAiAgent(model="text-davinci-003", api_key=xxx)
|
|
||||||
agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!")
|
|
||||||
```
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
model="text-davinci-003",
|
|
||||||
api_key=None,
|
|
||||||
chat_prompt_template=None,
|
|
||||||
run_prompt_template=None,
|
|
||||||
additional_tools=None,
|
|
||||||
):
|
|
||||||
if not is_openai_available():
|
|
||||||
raise ImportError(
|
|
||||||
"Using `OpenAiAgent` requires `openai`: `pip install openai`."
|
|
||||||
)
|
|
||||||
|
|
||||||
if api_key is None:
|
|
||||||
api_key = os.environ.get("OPENAI_API_KEY", None)
|
|
||||||
if api_key is None:
|
|
||||||
raise ValueError(
|
|
||||||
"You need an openai key to use `OpenAIAgent`. You can get one here: Get"
|
|
||||||
" one here https://openai.com/api/`. If you have one, set it in your"
|
|
||||||
" env with `os.environ['OPENAI_API_KEY'] = xxx."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
openai.api_key = api_key
|
|
||||||
self.model = model
|
|
||||||
super().__init__(
|
|
||||||
chat_prompt_template=chat_prompt_template,
|
|
||||||
run_prompt_template=run_prompt_template,
|
|
||||||
additional_tools=additional_tools,
|
|
||||||
)
|
|
||||||
|
|
||||||
def generate_many(self, prompts, stop):
|
|
||||||
if "gpt" in self.model:
|
|
||||||
return [self._chat_generate(prompt, stop) for prompt in prompts]
|
|
||||||
else:
|
|
||||||
return self._completion_generate(prompts, stop)
|
|
||||||
|
|
||||||
def generate_one(self, prompt, stop):
|
|
||||||
if "gpt" in self.model:
|
|
||||||
return self._chat_generate(prompt, stop)
|
|
||||||
else:
|
|
||||||
return self._completion_generate([prompt], stop)[0]
|
|
||||||
|
|
||||||
def _chat_generate(self, prompt, stop):
|
|
||||||
result = openai.ChatCompletion.create(
|
|
||||||
model=self.model,
|
|
||||||
messages=[{"role": "user", "content": prompt}],
|
|
||||||
temperature=0,
|
|
||||||
stop=stop,
|
|
||||||
)
|
|
||||||
return result["choices"][0]["message"]["content"]
|
|
||||||
|
|
||||||
def _completion_generate(self, prompts, stop):
|
|
||||||
result = openai.Completion.create(
|
|
||||||
model=self.model,
|
|
||||||
prompt=prompts,
|
|
||||||
temperature=0,
|
|
||||||
stop=stop,
|
|
||||||
max_tokens=200,
|
|
||||||
)
|
|
||||||
return [answer["text"] for answer in result["choices"]]
|
|
||||||
|
|
||||||
|
|
||||||
class AzureOpenAI(Agent):
|
|
||||||
"""
|
|
||||||
Agent that uses Azure OpenAI to generate code. See the [official
|
|
||||||
documentation](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) to learn how to deploy an openAI
|
|
||||||
model on Azure
|
|
||||||
|
|
||||||
<Tip warning={true}>
|
|
||||||
|
|
||||||
The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like
|
|
||||||
`"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
Args:
|
|
||||||
deployment_id (`str`):
|
|
||||||
The name of the deployed Azure openAI model to use.
|
|
||||||
api_key (`str`, *optional*):
|
|
||||||
The API key to use. If unset, will look for the environment variable `"AZURE_OPENAI_API_KEY"`.
|
|
||||||
resource_name (`str`, *optional*):
|
|
||||||
The name of your Azure OpenAI Resource. If unset, will look for the environment variable
|
|
||||||
`"AZURE_OPENAI_RESOURCE_NAME"`.
|
|
||||||
api_version (`str`, *optional*, default to `"2022-12-01"`):
|
|
||||||
The API version to use for this agent.
|
|
||||||
is_chat_mode (`bool`, *optional*):
|
|
||||||
Whether you are using a completion model or a chat model (see note above, chat models won't be as
|
|
||||||
efficient). Will default to `gpt` being in the `deployment_id` or not.
|
|
||||||
chat_prompt_template (`str`, *optional*):
|
|
||||||
Pass along your own prompt if you want to override the default template for the `chat` method. Can be the
|
|
||||||
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
|
|
||||||
`chat_prompt_template.txt` in this repo in this case.
|
|
||||||
run_prompt_template (`str`, *optional*):
|
|
||||||
Pass along your own prompt if you want to override the default template for the `run` method. Can be the
|
|
||||||
actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named
|
|
||||||
`run_prompt_template.txt` in this repo in this case.
|
|
||||||
additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*):
|
|
||||||
Any additional tools to include on top of the default ones. If you pass along a tool with the same name as
|
|
||||||
one of the default tools, that default tool will be overridden.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```py
|
|
||||||
from transformers import AzureOpenAiAgent
|
|
||||||
|
|
||||||
agent = AzureAiAgent(deployment_id="Davinci-003", api_key=xxx, resource_name=yyy)
|
|
||||||
agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!")
|
|
||||||
```
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
deployment_id,
|
|
||||||
api_key=None,
|
|
||||||
resource_name=None,
|
|
||||||
api_version="2022-12-01",
|
|
||||||
is_chat_model=None,
|
|
||||||
chat_prompt_template=None,
|
|
||||||
run_prompt_template=None,
|
|
||||||
additional_tools=None,
|
|
||||||
):
|
|
||||||
if not is_openai_available():
|
|
||||||
raise ImportError(
|
|
||||||
"Using `OpenAiAgent` requires `openai`: `pip install openai`."
|
|
||||||
)
|
|
||||||
|
|
||||||
self.deployment_id = deployment_id
|
|
||||||
openai.api_type = "azure"
|
|
||||||
if api_key is None:
|
|
||||||
api_key = os.environ.get("AZURE_OPENAI_API_KEY", None)
|
|
||||||
if api_key is None:
|
|
||||||
raise ValueError(
|
|
||||||
"You need an Azure openAI key to use `AzureOpenAIAgent`. If you have"
|
|
||||||
" one, set it in your env with `os.environ['AZURE_OPENAI_API_KEY'] ="
|
|
||||||
" xxx."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
openai.api_key = api_key
|
|
||||||
if resource_name is None:
|
|
||||||
resource_name = os.environ.get("AZURE_OPENAI_RESOURCE_NAME", None)
|
|
||||||
if resource_name is None:
|
|
||||||
raise ValueError(
|
|
||||||
"You need a resource_name to use `AzureOpenAIAgent`. If you have one,"
|
|
||||||
" set it in your env with `os.environ['AZURE_OPENAI_RESOURCE_NAME'] ="
|
|
||||||
" xxx."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
openai.api_base = f"https://{resource_name}.openai.azure.com"
|
|
||||||
openai.api_version = api_version
|
|
||||||
|
|
||||||
if is_chat_model is None:
|
|
||||||
is_chat_model = "gpt" in deployment_id.lower()
|
|
||||||
self.is_chat_model = is_chat_model
|
|
||||||
|
|
||||||
super().__init__(
|
|
||||||
chat_prompt_template=chat_prompt_template,
|
|
||||||
run_prompt_template=run_prompt_template,
|
|
||||||
additional_tools=additional_tools,
|
|
||||||
)
|
|
||||||
|
|
||||||
def generate_many(self, prompts, stop):
|
|
||||||
if self.is_chat_model:
|
|
||||||
return [self._chat_generate(prompt, stop) for prompt in prompts]
|
|
||||||
else:
|
|
||||||
return self._completion_generate(prompts, stop)
|
|
||||||
|
|
||||||
def generate_one(self, prompt, stop):
|
|
||||||
if self.is_chat_model:
|
|
||||||
return self._chat_generate(prompt, stop)
|
|
||||||
else:
|
|
||||||
return self._completion_generate([prompt], stop)[0]
|
|
||||||
|
|
||||||
def _chat_generate(self, prompt, stop):
|
|
||||||
result = openai.ChatCompletion.create(
|
|
||||||
engine=self.deployment_id,
|
|
||||||
messages=[{"role": "user", "content": prompt}],
|
|
||||||
temperature=0,
|
|
||||||
stop=stop,
|
|
||||||
)
|
|
||||||
return result["choices"][0]["message"]["content"]
|
|
||||||
|
|
||||||
def _completion_generate(self, prompts, stop):
|
|
||||||
result = openai.Completion.create(
|
|
||||||
engine=self.deployment_id,
|
|
||||||
prompt=prompts,
|
|
||||||
temperature=0,
|
|
||||||
stop=stop,
|
|
||||||
max_tokens=200,
|
|
||||||
)
|
|
||||||
return [answer["text"] for answer in result["choices"]]
|
|
@ -1,110 +0,0 @@
|
|||||||
import os
|
|
||||||
import logging
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from swarms.models.dalle3 import Dalle
|
|
||||||
from swarms.models import OpenAIChat
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class Idea2Image:
|
|
||||||
"""
|
|
||||||
A class used to generate images from text prompts using DALLE-3.
|
|
||||||
|
|
||||||
...
|
|
||||||
|
|
||||||
Attributes
|
|
||||||
----------
|
|
||||||
image : str
|
|
||||||
Text prompt for the image to generate
|
|
||||||
openai_api_key : str
|
|
||||||
OpenAI API key
|
|
||||||
cookie : str
|
|
||||||
Cookie value for DALLE-3
|
|
||||||
output_folder : str
|
|
||||||
Folder to save the generated images
|
|
||||||
|
|
||||||
Methods
|
|
||||||
-------
|
|
||||||
llm_prompt():
|
|
||||||
Returns a prompt for refining the image generation
|
|
||||||
generate_image():
|
|
||||||
Generates and downloads the image based on the prompt
|
|
||||||
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
------
|
|
||||||
from dalle3 import Idea2Image
|
|
||||||
|
|
||||||
idea2image = Idea2Image(
|
|
||||||
image="Fish hivemind swarm in light blue avatar anime in zen garden pond concept art anime art, happy fish, anime scenery"
|
|
||||||
)
|
|
||||||
idea2image.run()
|
|
||||||
"""
|
|
||||||
|
|
||||||
image: str
|
|
||||||
openai_api_key: str = os.getenv("OPENAI_API_KEY") or None
|
|
||||||
cookie: str = os.getenv("BING_COOKIE") or None
|
|
||||||
output_folder: str = "images/"
|
|
||||||
|
|
||||||
def __post_init__(self):
|
|
||||||
self.llm = OpenAIChat(openai_api_key=self.openai_api_key)
|
|
||||||
self.dalle = Dalle(self.cookie)
|
|
||||||
|
|
||||||
def llm_prompt(self):
|
|
||||||
LLM_PROMPT = f"""
|
|
||||||
Refine the USER prompt to create a more precise image tailored to the user's needs using
|
|
||||||
an image generator like DALLE-3.
|
|
||||||
|
|
||||||
###### FOLLOW THE GUIDE BELOW TO REFINE THE PROMPT ######
|
|
||||||
|
|
||||||
- Use natural language prompts up to 400 characters to describe the image you want to generate. Be as specific or vague as needed.
|
|
||||||
|
|
||||||
- Frame your photographic prompts like camera position, lighting, film type, year, usage context. This implicitly suggests image qualities.
|
|
||||||
|
|
||||||
- For illustrations, you can borrow photographic terms like "close up" and prompt for media, style, artist, animation style, etc.
|
|
||||||
|
|
||||||
- Prompt hack: name a film/TV show genre + year to "steal the look" for costumes, lighting, etc without knowing technical details.
|
|
||||||
|
|
||||||
- Try variations of a prompt, make edits, and do recursive uncropping to create interesting journeys and zoom-out effects.
|
|
||||||
|
|
||||||
- Use an image editor like Photopea to uncrop DALL-E outputs and prompt again to extend the image.
|
|
||||||
|
|
||||||
- Combine separate DALL-E outputs into panoramas and murals with careful positioning/editing.
|
|
||||||
|
|
||||||
- Browse communities like Reddit r/dalle2 to get inspired and share your creations. See tools, free image resources, articles.
|
|
||||||
|
|
||||||
- Focus prompts on size, structure, shape, mood, aesthetics to influence the overall vibe and composition.
|
|
||||||
|
|
||||||
- Be more vague or detailed as needed - DALL-E has studied over 400M images and can riff creatively or replicate specific styles.
|
|
||||||
|
|
||||||
- Be descriptive, describe the art style at the end like fusing concept art with anime art or game art or product design art.
|
|
||||||
|
|
||||||
###### END OF GUIDE ######
|
|
||||||
|
|
||||||
Prompt to refine: {self.image}
|
|
||||||
"""
|
|
||||||
return LLM_PROMPT
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"""
|
|
||||||
Generates and downloads the image based on the prompt.
|
|
||||||
|
|
||||||
This method refines the prompt using the llm, opens the website with the query,
|
|
||||||
gets the image URLs, and downloads the images to the specified folder.
|
|
||||||
"""
|
|
||||||
# Set up logging
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
# Refine the prompt using the llm
|
|
||||||
image = self.llm_prompt()
|
|
||||||
refined_prompt = self.llm(image)
|
|
||||||
print(f"Refined prompt: {refined_prompt}")
|
|
||||||
|
|
||||||
# Open the website with your query
|
|
||||||
self.dalle.create(refined_prompt)
|
|
||||||
|
|
||||||
# Get the image URLs
|
|
||||||
urls = self.dalle.get_urls()
|
|
||||||
|
|
||||||
# Download the images to your specified folder
|
|
||||||
self.dalle.download(urls, self.output_folder)
|
|
@ -1,158 +0,0 @@
|
|||||||
from langchain.chains import LLMChain
|
|
||||||
from langchain.prompts import PromptTemplate
|
|
||||||
from langchain.memory import ConversationBufferWindowMemory
|
|
||||||
|
|
||||||
|
|
||||||
class MetaPrompterAgent:
|
|
||||||
"""
|
|
||||||
Meta Prompting Agent
|
|
||||||
The Meta Prompting Agent has 1 purpose: to create better prompts for an agent.
|
|
||||||
|
|
||||||
The meta prompting agent would be used in this flow:
|
|
||||||
user task -> MetaPrompterAgent -> Agent
|
|
||||||
|
|
||||||
Args:
|
|
||||||
llm (BaseLanguageModel): Language Model
|
|
||||||
max_iters (int, optional): Maximum number of iterations. Defaults to 3.
|
|
||||||
max_meta_iters (int, optional): Maximum number of meta iterations. Defaults to 5.
|
|
||||||
failed_phrase (str, optional): Phrase to indicate failure. Defaults to "task failed".
|
|
||||||
success_phrase (str, optional): Phrase to indicate success. Defaults to "task succeeded".
|
|
||||||
instructions (str, optional): Instructions to be used in the meta prompt. Defaults to "None".
|
|
||||||
template (str, optional): Template to be used in the meta prompt. Defaults to None.
|
|
||||||
memory (ConversationBufferWindowMemory, optional): Memory to be used in the meta prompt. Defaults to None.
|
|
||||||
meta_template (str, optional): Template to be used in the meta prompt. Defaults to None.
|
|
||||||
human_input (bool, optional): Whether to use human input. Defaults to False.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: Response from the agent
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
--------------
|
|
||||||
from swarms.workers import Worker
|
|
||||||
from swarms.agents.meta_prompter import MetaPrompterAgent
|
|
||||||
from langchain.llms import OpenAI
|
|
||||||
|
|
||||||
#init llm
|
|
||||||
llm = OpenAI()
|
|
||||||
|
|
||||||
#init the meta prompter agent that optimized prompts
|
|
||||||
meta_optimizer = MetaPrompterAgent(llm=llm)
|
|
||||||
|
|
||||||
#init the worker agent
|
|
||||||
worker = Worker(llm)
|
|
||||||
|
|
||||||
#broad task to complete
|
|
||||||
task = "Create a feedforward in pytorch"
|
|
||||||
|
|
||||||
#optimize the prompt
|
|
||||||
optimized_prompt = meta_optimizer.run(task)
|
|
||||||
|
|
||||||
#run the optimized prompt with detailed instructions
|
|
||||||
result = worker.run(optimized_prompt)
|
|
||||||
|
|
||||||
print(result)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
llm,
|
|
||||||
max_iters: int = 3,
|
|
||||||
max_meta_iters: int = 5,
|
|
||||||
failed_phrase: str = "task failed",
|
|
||||||
success_phrase: str = "task succeeded",
|
|
||||||
instructions: str = "None",
|
|
||||||
template: str = None,
|
|
||||||
memory=None,
|
|
||||||
meta_template: str = None,
|
|
||||||
human_input: bool = False,
|
|
||||||
):
|
|
||||||
self.llm = llm
|
|
||||||
self.max_iters = max_iters
|
|
||||||
self.max_meta_iters = max_meta_iters
|
|
||||||
self.failed_phrase = failed_phrase
|
|
||||||
self.success_phrase = success_phrase
|
|
||||||
self.instructions = instructions
|
|
||||||
self.template = template
|
|
||||||
self.memory = memory
|
|
||||||
self.meta_template = meta_template
|
|
||||||
self.human_input = human_input
|
|
||||||
|
|
||||||
if memory is None:
|
|
||||||
memory = ConversationBufferWindowMemory()
|
|
||||||
memory.ai_prefix = "Assistant:"
|
|
||||||
|
|
||||||
template = f"""
|
|
||||||
Instructions: {self.instructions}
|
|
||||||
{{{memory.memory_key}}}
|
|
||||||
Human: {{human_input}}
|
|
||||||
Assistant:
|
|
||||||
"""
|
|
||||||
|
|
||||||
prompt = PromptTemplate(
|
|
||||||
input_variables=["history", "human_input"], template=template
|
|
||||||
)
|
|
||||||
|
|
||||||
self.chain = LLMChain(
|
|
||||||
llm=self.llm(),
|
|
||||||
prompt=prompt,
|
|
||||||
verbose=True,
|
|
||||||
memory=ConversationBufferWindowMemory(),
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_chat_history(self, chain_memory):
|
|
||||||
"""Get Chat History from the memory"""
|
|
||||||
memory_key = chain_memory.memory_key
|
|
||||||
chat_history = chain_memory.load_memory_variables(memory_key)[memory_key]
|
|
||||||
return chat_history
|
|
||||||
|
|
||||||
def get_new_instructions(self, meta_output):
|
|
||||||
"""Get New Instructions from the meta_output"""
|
|
||||||
delimiter = "Instructions: "
|
|
||||||
new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :]
|
|
||||||
return new_instructions
|
|
||||||
|
|
||||||
def run(self, task: str):
|
|
||||||
"""
|
|
||||||
Run the MetaPrompterAgent
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be completed
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The response from the agent
|
|
||||||
"""
|
|
||||||
key_phrases = [self.success_phrase, self.failed_phrase]
|
|
||||||
|
|
||||||
for i in range(self.max_meta_iters):
|
|
||||||
print(f"[Epsisode: {i+1}/{self.max_meta_iters}]")
|
|
||||||
|
|
||||||
chain = self.chain(memory=None)
|
|
||||||
|
|
||||||
output = chain.predict(human_input=task)
|
|
||||||
|
|
||||||
for j in range(self.max_iters):
|
|
||||||
print(f"(Step {j+1}/{self.max_iters})")
|
|
||||||
print(f"Assistant: {output}")
|
|
||||||
print("Human: ")
|
|
||||||
|
|
||||||
if self.human_input:
|
|
||||||
human_input = input()
|
|
||||||
|
|
||||||
if any(phrase in human_input.lower() for phrase in key_phrases):
|
|
||||||
break
|
|
||||||
|
|
||||||
output = chain.predict(human_input.lower)
|
|
||||||
|
|
||||||
if self.success_phrase in human_input.lower():
|
|
||||||
print("You succeed! Thanks for using!")
|
|
||||||
return
|
|
||||||
|
|
||||||
meta_chain = self.initialize_meta_chain()
|
|
||||||
meta_output = meta_chain.predict(
|
|
||||||
chat_history=self.get_chat_history(chain.memory)
|
|
||||||
)
|
|
||||||
print(f"Feedback: {meta_output}")
|
|
||||||
|
|
||||||
self.instructions = self.get_new_instructions(meta_output)
|
|
||||||
print(f"New Instruction: {self.instructions}")
|
|
||||||
print("\n" + "#" * 80 + "\n")
|
|
File diff suppressed because it is too large
Load Diff
@ -1,12 +0,0 @@
|
|||||||
"""The Replicator"""
|
|
||||||
|
|
||||||
|
|
||||||
class Replicator:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
model_name,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def run(self, task):
|
|
||||||
pass
|
|
@ -1,9 +0,0 @@
|
|||||||
class PromptRefiner:
|
|
||||||
def __init__(self, system_prompt: str, llm):
|
|
||||||
super().__init__()
|
|
||||||
self.system_prompt = system_prompt
|
|
||||||
self.llm = llm
|
|
||||||
|
|
||||||
def run(self, task: str):
|
|
||||||
refine = self.llm(f"System Prompt: {self.system_prompt} Current task: {task}")
|
|
||||||
return refine
|
|
@ -1,37 +0,0 @@
|
|||||||
from termcolor import colored
|
|
||||||
|
|
||||||
|
|
||||||
class SimpleAgent:
|
|
||||||
"""
|
|
||||||
Simple Agent is a simple agent that runs a flow.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
name (str): Name of the agent
|
|
||||||
flow (Flow): Flow to run
|
|
||||||
|
|
||||||
Example:
|
|
||||||
>>> from swarms.agents.simple_agent import SimpleAgent
|
|
||||||
>>> from swarms.structs import Flow
|
|
||||||
>>> from swarms.models import OpenAIChat
|
|
||||||
>>> api_key = ""
|
|
||||||
>>> llm = OpenAIChat()
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
name: str,
|
|
||||||
flow,
|
|
||||||
):
|
|
||||||
self.name = name
|
|
||||||
self.flow = flow
|
|
||||||
self.message_history = []
|
|
||||||
|
|
||||||
def run(self, task: str) -> str:
|
|
||||||
"""Run method"""
|
|
||||||
metrics = print(colored(f"Agent {self.name} is running task: {task}", "red"))
|
|
||||||
print(metrics)
|
|
||||||
|
|
||||||
response = self.flow.run(task)
|
|
||||||
self.message_history.append((self.name, response))
|
|
||||||
return response
|
|
Loading…
Reference in new issue