parent
d037593a40
commit
20a994b62f
@ -1,136 +0,0 @@
|
||||
from swarms.prompts.documentation import DOCUMENTATION_WRITER_SOP
|
||||
from swarms.prompts.tests import TEST_WRITER_SOP_PROMPT
|
||||
from swarms.structs.agent import Agent
|
||||
|
||||
|
||||
class UnitTesterAgent:
|
||||
"""
|
||||
This class represents a unit testing agent responsible for generating unit tests for the swarms package.
|
||||
|
||||
Attributes:
|
||||
- llm: The low-level model used by the agent.
|
||||
- agent_name (str): The name of the agent.
|
||||
- agent_description (str): The description of the agent.
|
||||
- max_loops (int): The maximum number of loops the agent can run.
|
||||
- SOP_PROMPT: The system output prompt used by the agent.
|
||||
- agent: The underlying agent object used for running tasks.
|
||||
|
||||
Methods:
|
||||
- run(task: str, *args, **kwargs) -> str: Run the agent with the given task and return the response.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm,
|
||||
agent_name: str = "Unit Testing Agent",
|
||||
agent_description: str = "This agent is responsible for generating unit tests for the swarms package.",
|
||||
max_loops: int = 1,
|
||||
sop: str = None,
|
||||
module: str = None,
|
||||
path: str = None,
|
||||
autosave: bool = True,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
self.llm = llm
|
||||
self.agent_name = agent_name
|
||||
self.agent_description = agent_description
|
||||
self.max_loops = max_loops
|
||||
self.sop = sop
|
||||
self.module = module
|
||||
self.path = path
|
||||
self.autosave = autosave
|
||||
|
||||
self.agent = Agent(
|
||||
llm=llm,
|
||||
agent_name=agent_name,
|
||||
agent_description=agent_description,
|
||||
autosave=self.autosave,
|
||||
system_prompt=agent_description,
|
||||
max_loops=max_loops,
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def run(self, task: str, module: str, path: str, *args, **kwargs):
|
||||
"""
|
||||
Run the agent with the given task.
|
||||
|
||||
Args:
|
||||
- task (str): The task to run the agent with.
|
||||
|
||||
Returns:
|
||||
- str: The response from the agent.
|
||||
"""
|
||||
return self.agent.run(
|
||||
TEST_WRITER_SOP_PROMPT(task, self.module, self.path),
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class DocumentorAgent:
|
||||
"""
|
||||
This class represents a documentor agent responsible for generating unit tests for the swarms package.
|
||||
|
||||
Attributes:
|
||||
- llm: The low-level model used by the agent.
|
||||
- agent_name (str): The name of the agent.
|
||||
- agent_description (str): The description of the agent.
|
||||
- max_loops (int): The maximum number of loops the agent can run.
|
||||
- SOP_PROMPT: The system output prompt used by the agent.
|
||||
- agent: The underlying agent object used for running tasks.
|
||||
|
||||
Methods:
|
||||
- run(task: str, *args, **kwargs) -> str: Run the agent with the given task and return the response.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm,
|
||||
agent_name: str = "Documentor Agent",
|
||||
agent_description: str = "This agent is responsible for generating unit tests for the swarms package.",
|
||||
max_loops: int = 1,
|
||||
sop: str = None,
|
||||
module: str = None,
|
||||
path: str = None,
|
||||
autosave: bool = True,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.llm = llm
|
||||
self.agent_name = agent_name
|
||||
self.agent_description = agent_description
|
||||
self.max_loops = max_loops
|
||||
self.sop = sop
|
||||
self.module = module
|
||||
self.path = path
|
||||
self.autosave = autosave
|
||||
|
||||
self.agent = Agent(
|
||||
llm=llm,
|
||||
agent_name=agent_name,
|
||||
agent_description=agent_description,
|
||||
autosave=self.autosave,
|
||||
system_prompt=agent_description,
|
||||
max_loops=max_loops,
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def run(self, task: str, module: str, path: str, *args, **kwargs):
|
||||
"""
|
||||
Run the agent with the given task.
|
||||
|
||||
Args:
|
||||
- task (str): The task to run the agent with.
|
||||
|
||||
Returns:
|
||||
- str: The response from the agent.
|
||||
"""
|
||||
return self.agent.run(
|
||||
DOCUMENTATION_WRITER_SOP(task, self.module) * args,
|
||||
**kwargs,
|
||||
)
|
@ -1,100 +0,0 @@
|
||||
from langchain.base_language import BaseLanguageModel
|
||||
from langchain_experimental.autonomous_agents.hugginggpt.repsonse_generator import (
|
||||
load_response_generator,
|
||||
)
|
||||
from langchain_experimental.autonomous_agents.hugginggpt.task_executor import (
|
||||
TaskExecutor,
|
||||
)
|
||||
from langchain_experimental.autonomous_agents.hugginggpt.task_planner import (
|
||||
load_chat_planner,
|
||||
)
|
||||
from transformers import load_tool
|
||||
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
|
||||
class OmniModalAgent(Agent):
|
||||
"""
|
||||
OmniModalAgent
|
||||
LLM -> Plans -> Tasks -> Tools -> Response
|
||||
|
||||
Architecture:
|
||||
1. LLM: Language Model
|
||||
2. Chat Planner: Plans
|
||||
3. Task Executor: Tasks
|
||||
4. Tools: Tools
|
||||
|
||||
Args:
|
||||
llm (BaseLanguageModel): Language Model
|
||||
tools (List[BaseTool]): List of tools
|
||||
|
||||
Returns:
|
||||
str: response
|
||||
|
||||
Usage:
|
||||
from swarms import OmniModalAgent, OpenAIChat,
|
||||
|
||||
llm = OpenAIChat()
|
||||
agent = OmniModalAgent(llm)
|
||||
response = agent.run("Hello, how are you? Create an image of how your are doing!")
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm: BaseLanguageModel,
|
||||
verbose: bool = False,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(llm=llm, *args, **kwargs)
|
||||
self.llm = llm
|
||||
self.verbose = verbose
|
||||
|
||||
print("Loading tools...")
|
||||
self.tools = [
|
||||
load_tool(tool_name)
|
||||
for tool_name in [
|
||||
"document-question-answering",
|
||||
"image-captioning",
|
||||
"image-question-answering",
|
||||
"image-segmentation",
|
||||
"speech-to-text",
|
||||
"summarization",
|
||||
"text-classification",
|
||||
"text-question-answering",
|
||||
"translation",
|
||||
"huggingface-tools/text-to-image",
|
||||
"huggingface-tools/text-to-video",
|
||||
"text-to-speech",
|
||||
"huggingface-tools/text-download",
|
||||
"huggingface-tools/image-transformation",
|
||||
]
|
||||
]
|
||||
|
||||
# Load the chat planner and response generator
|
||||
self.chat_planner = load_chat_planner(llm)
|
||||
self.response_generator = load_response_generator(llm)
|
||||
self.task_executor = TaskExecutor
|
||||
self.history = []
|
||||
|
||||
def run(self, task: str) -> str:
|
||||
"""Run the OmniAgent"""
|
||||
try:
|
||||
plan = self.chat_planner.plan(
|
||||
inputs={
|
||||
"input": task,
|
||||
"hf_tools": self.tools,
|
||||
}
|
||||
)
|
||||
self.task_executor = TaskExecutor(plan)
|
||||
self.task_executor.run()
|
||||
|
||||
response = self.response_generator.generate(
|
||||
{"task_execution": self.task_executor}
|
||||
)
|
||||
|
||||
return response
|
||||
except Exception as error:
|
||||
logger.error(f"Error running the agent: {error}")
|
||||
return f"Error running the agent: {error}"
|
@ -1,105 +0,0 @@
|
||||
import importlib
|
||||
import pkgutil
|
||||
from typing import Any
|
||||
|
||||
import swarms.models
|
||||
from swarms.models.base_llm import BaseLLM
|
||||
from swarms.structs.conversation import Conversation
|
||||
|
||||
|
||||
def get_llm_by_name(name: str):
|
||||
"""
|
||||
Searches all the modules exported from the 'swarms.models' path for a class with the given name.
|
||||
|
||||
Args:
|
||||
name (str): The name of the class to search for.
|
||||
|
||||
Returns:
|
||||
type: The class with the given name, or None if no such class is found.
|
||||
"""
|
||||
for importer, modname, ispkg in pkgutil.iter_modules(
|
||||
swarms.models.__path__
|
||||
):
|
||||
module = importlib.import_module(f"swarms.models.{modname}")
|
||||
if hasattr(module, name):
|
||||
return getattr(module, name)
|
||||
return None
|
||||
|
||||
|
||||
# Run the language model in a loop for n iterations
|
||||
def SimpleAgent(
|
||||
llm: BaseLLM = None, iters: Any = "automatic", *args, **kwargs
|
||||
):
|
||||
"""
|
||||
A simple agent that interacts with a language model.
|
||||
|
||||
Args:
|
||||
llm (BaseLLM): The language model to use for generating responses.
|
||||
iters (Any): The number of iterations or "automatic" to run indefinitely.
|
||||
*args: Additional positional arguments to pass to the language model.
|
||||
**kwargs: Additional keyword arguments to pass to the language model.
|
||||
|
||||
Raises:
|
||||
Exception: If the language model is not defined or cannot be found.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
try:
|
||||
if llm is None:
|
||||
raise Exception("Language model not defined")
|
||||
|
||||
if isinstance(llm, str):
|
||||
llm = get_llm_by_name(llm)
|
||||
if llm is None:
|
||||
raise Exception(f"Language model {llm} not found")
|
||||
llm = llm(*args, **kwargs)
|
||||
except Exception as error:
|
||||
print(f"[ERROR][SimpleAgent] {error}")
|
||||
raise error
|
||||
|
||||
try:
|
||||
conv = Conversation(*args, **kwargs)
|
||||
if iters == "automatic":
|
||||
i = 0
|
||||
while True:
|
||||
user_input = input("\033[91mUser:\033[0m ")
|
||||
conv.add("user", user_input)
|
||||
if user_input.lower() == "quit":
|
||||
break
|
||||
task = (
|
||||
conv.return_history_as_string()
|
||||
) # Get the conversation history
|
||||
out = llm(task, *args, **kwargs)
|
||||
conv.add("assistant", out)
|
||||
print(
|
||||
f"\033[94mAssistant:\033[0m {out}",
|
||||
)
|
||||
conv.display_conversation()
|
||||
conv.export_conversation("conversation.txt")
|
||||
i += 1
|
||||
else:
|
||||
for i in range(iters):
|
||||
user_input = input("\033[91mUser:\033[0m ")
|
||||
conv.add("user", user_input)
|
||||
if user_input.lower() == "quit":
|
||||
break
|
||||
task = (
|
||||
conv.return_history_as_string()
|
||||
) # Get the conversation history
|
||||
out = llm(task, *args, **kwargs)
|
||||
conv.add("assistant", out)
|
||||
print(
|
||||
f"\033[94mAssistant:\033[0m {out}",
|
||||
)
|
||||
conv.display_conversation()
|
||||
conv.export_conversation("conversation.txt")
|
||||
|
||||
except Exception as error:
|
||||
print(f"[ERROR][SimpleAgentConversation] {error}")
|
||||
raise error
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("[INFO][SimpleAgentConversation] Keyboard interrupt")
|
||||
conv.export_conversation("conversation.txt")
|
||||
raise KeyboardInterrupt
|
Loading…
Reference in new issue