parent
77ecece6e7
commit
090a47fa68
@ -0,0 +1,22 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms.agents.worker_agent import Worker
|
||||
from swarms import OpenAIChat
|
||||
|
||||
load_dotenv()
|
||||
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
worker = Worker(
|
||||
name="My Worker",
|
||||
role="Worker",
|
||||
human_in_the_loop=False,
|
||||
tools = [],
|
||||
temperature=0.5,
|
||||
llm=OpenAIChat(openai_api_key=api_key),
|
||||
)
|
||||
|
||||
out = worker.run(
|
||||
"Hello, how are you? Create an image of how your are doing!"
|
||||
)
|
||||
print(out)
|
@ -0,0 +1,199 @@
|
||||
import os
|
||||
from typing import Any, List
|
||||
|
||||
import faiss
|
||||
from langchain.docstore import InMemoryDocstore
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.vectorstores import FAISS
|
||||
from langchain_experimental.autonomous_agents import AutoGPT
|
||||
|
||||
from swarms.utils.decorators import error_decorator, timing_decorator
|
||||
|
||||
|
||||
class Worker:
|
||||
"""
|
||||
The Worker class represents an autonomous agent that can perform tassks through
|
||||
function calls or by running a chat.
|
||||
|
||||
Args:
|
||||
name (str, optional): Name of the agent. Defaults to "Autobot Swarm Worker".
|
||||
role (str, optional): Role of the agent. Defaults to "Worker in a swarm".
|
||||
external_tools (list, optional): List of external tools. Defaults to None.
|
||||
human_in_the_loop (bool, optional): Whether to include human in the loop. Defaults to False.
|
||||
temperature (float, optional): Temperature for the agent. Defaults to 0.5.
|
||||
llm ([type], optional): Language model. Defaults to None.
|
||||
openai_api_key (str, optional): OpenAI API key. Defaults to None.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If there is an error while setting up the agent.
|
||||
|
||||
Example:
|
||||
>>> worker = Worker(
|
||||
... name="My Worker",
|
||||
... role="Worker",
|
||||
... external_tools=[MyTool1(), MyTool2()],
|
||||
... human_in_the_loop=False,
|
||||
... temperature=0.5,
|
||||
... )
|
||||
>>> worker.run("What's the weather in Miami?")
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str = "WorkerAgent",
|
||||
role: str = "Worker in a swarm",
|
||||
external_tools=None,
|
||||
human_in_the_loop: bool = False,
|
||||
temperature: float = 0.5,
|
||||
llm=None,
|
||||
openai_api_key: str = None,
|
||||
tools: List[Any] = None,
|
||||
embedding_size: int = 1536,
|
||||
search_kwargs: dict = {"k": 8},
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
self.name = name
|
||||
self.role = role
|
||||
self.external_tools = external_tools
|
||||
self.human_in_the_loop = human_in_the_loop
|
||||
self.temperature = temperature
|
||||
self.llm = llm
|
||||
self.openai_api_key = openai_api_key
|
||||
self.tools = tools
|
||||
self.embedding_size = embedding_size
|
||||
self.search_kwargs = search_kwargs
|
||||
|
||||
self.setup_tools(external_tools)
|
||||
self.setup_memory()
|
||||
self.setup_agent()
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset the message history.
|
||||
"""
|
||||
self.message_history = []
|
||||
|
||||
def receieve(self, name: str, message: str) -> None:
|
||||
"""
|
||||
Receive a message and update the message history.
|
||||
|
||||
Parameters:
|
||||
- `name` (str): The name of the sender.
|
||||
- `message` (str): The received message.
|
||||
"""
|
||||
self.message_history.append(f"{name}: {message}")
|
||||
|
||||
def send(self) -> str:
|
||||
"""Send message history."""
|
||||
self.agent.run(task=self.message_history)
|
||||
|
||||
def setup_tools(self, external_tools):
|
||||
"""
|
||||
Set up tools for the worker.
|
||||
|
||||
Parameters:
|
||||
- `external_tools` (list): List of external tools (optional).
|
||||
|
||||
Example:
|
||||
```
|
||||
external_tools = [MyTool1(), MyTool2()]
|
||||
worker = Worker(model_name="gpt-4",
|
||||
openai_api_key="my_key",
|
||||
name="My Worker",
|
||||
role="Worker",
|
||||
external_tools=external_tools,
|
||||
human_in_the_loop=False,
|
||||
temperature=0.5)
|
||||
```
|
||||
"""
|
||||
if self.tools is None:
|
||||
self.tools = []
|
||||
|
||||
if external_tools is not None:
|
||||
self.tools.extend(external_tools)
|
||||
|
||||
def setup_memory(self):
|
||||
"""
|
||||
Set up memory for the worker.
|
||||
"""
|
||||
openai_api_key = (
|
||||
os.getenv("OPENAI_API_KEY") or self.openai_api_key
|
||||
)
|
||||
try:
|
||||
embeddings_model = OpenAIEmbeddings(
|
||||
openai_api_key=openai_api_key
|
||||
)
|
||||
embedding_size = self.embedding_size
|
||||
index = faiss.IndexFlatL2(embedding_size)
|
||||
|
||||
self.vectorstore = FAISS(
|
||||
embeddings_model.embed_query,
|
||||
index,
|
||||
InMemoryDocstore({}),
|
||||
{},
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
raise RuntimeError(
|
||||
"Error setting up memory perhaps try try tuning the"
|
||||
f" embedding size: {error}"
|
||||
)
|
||||
|
||||
def setup_agent(self):
|
||||
"""
|
||||
Set up the autonomous agent.
|
||||
"""
|
||||
try:
|
||||
self.agent = AutoGPT.from_llm_and_tools(
|
||||
ai_name=self.name,
|
||||
ai_role=self.role,
|
||||
tools=self.tools,
|
||||
llm=self.llm,
|
||||
memory=self.vectorstore.as_retriever(
|
||||
search_kwargs=self.search_kwargs
|
||||
),
|
||||
human_in_the_loop=self.human_in_the_loop,
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
raise RuntimeError(f"Error setting up agent: {error}")
|
||||
|
||||
# @log_decorator
|
||||
@error_decorator
|
||||
@timing_decorator
|
||||
def run(self, task: str = None, *args, **kwargs):
|
||||
"""
|
||||
Run the autonomous agent on a given task.
|
||||
|
||||
Parameters:
|
||||
- `task`: The task to be processed.
|
||||
|
||||
Returns:
|
||||
- `result`: The result of the agent's processing.
|
||||
"""
|
||||
try:
|
||||
result = self.agent.run([task], *args, **kwargs)
|
||||
return result
|
||||
except Exception as error:
|
||||
raise RuntimeError(f"Error while running agent: {error}")
|
||||
|
||||
# @log_decorator
|
||||
@error_decorator
|
||||
@timing_decorator
|
||||
def __call__(self, task: str = None, *args, **kwargs):
|
||||
"""
|
||||
Make the worker callable to run the agent on a given task.
|
||||
|
||||
Parameters:
|
||||
- `task`: The task to be processed.
|
||||
|
||||
Returns:
|
||||
- `results`: The results of the agent's processing.
|
||||
"""
|
||||
try:
|
||||
results = self.run(task, *args, **kwargs)
|
||||
return results
|
||||
except Exception as error:
|
||||
raise RuntimeError(f"Error while running agent: {error}")
|
@ -0,0 +1,60 @@
|
||||
def worker_agent_system(name: str, memory: str = None):
|
||||
return """
|
||||
You are {name},
|
||||
Your decisions must always be made independently without seeking user assistance.
|
||||
Play to your strengths as an LLM and pursue simple strategies with no legal complications.
|
||||
If you have completed all your tasks, make sure to use the "finish" command.
|
||||
|
||||
GOALS:
|
||||
|
||||
1. Hello, how are you? Create an image of how you are doing!
|
||||
|
||||
Constraints:
|
||||
|
||||
1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.
|
||||
2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.
|
||||
3. No user assistance
|
||||
4. Exclusively use the commands listed in double quotes e.g. "command name"
|
||||
|
||||
Commands:
|
||||
|
||||
1. finish: use this to signal that you have finished all your objectives, args: "response": "final response to let people know you have finished your objectives"
|
||||
|
||||
Resources:
|
||||
|
||||
1. Internet access for searches and information gathering.
|
||||
2. Long Term memory management.
|
||||
3. GPT-3.5 powered Agents for delegation of simple tasks.
|
||||
4. File output.
|
||||
|
||||
Performance Evaluation:
|
||||
|
||||
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
|
||||
2. Constructively self-criticize your big-picture behavior constantly.
|
||||
3. Reflect on past decisions and strategies to refine your approach.
|
||||
4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.
|
||||
|
||||
You should only respond in JSON format as described below
|
||||
Response Format:
|
||||
{
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
},
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args": {
|
||||
"arg name": "value"
|
||||
}
|
||||
}
|
||||
}
|
||||
Ensure the response can be parsed by Python json.loads
|
||||
System: The current time and date is Sat Jan 20 10:39:07 2024
|
||||
System: This reminds you of these events from your past:
|
||||
[{memory}]
|
||||
|
||||
Human: Determine which next command to use, and respond using the format specified above:
|
||||
""".format(name=name, memory=memory)
|
@ -0,0 +1,32 @@
|
||||
from typing import List
|
||||
|
||||
from swarms.structs.step import Step
|
||||
|
||||
|
||||
class Plan:
|
||||
def __init__(self, steps: List[Step]):
|
||||
"""
|
||||
Initializes a Plan object.
|
||||
|
||||
Args:
|
||||
steps (List[Step]): A list of Step objects representing the steps in the plan.
|
||||
"""
|
||||
self.steps = steps
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""
|
||||
Returns a string representation of the Plan object.
|
||||
|
||||
Returns:
|
||||
str: A string representation of the Plan object.
|
||||
"""
|
||||
return str([str(step) for step in self.steps])
|
||||
|
||||
def __repr(self) -> str:
|
||||
"""
|
||||
Returns a string representation of the Plan object.
|
||||
|
||||
Returns:
|
||||
str: A string representation of the Plan object.
|
||||
"""
|
||||
return str(self)
|
@ -0,0 +1,24 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List
|
||||
|
||||
from swarms.tools.tool import BaseTool
|
||||
|
||||
|
||||
@dataclass
|
||||
class Step:
|
||||
"""
|
||||
Represents a step in a process.
|
||||
|
||||
Attributes:
|
||||
task (str): The task associated with the step.
|
||||
id (int): The unique identifier of the step.
|
||||
dep (List[int]): The list of step IDs that this step depends on.
|
||||
args (Dict[str, str]): The arguments associated with the step.
|
||||
tool (BaseTool): The tool used to execute the step.
|
||||
"""
|
||||
|
||||
task: str
|
||||
id: int
|
||||
dep: List[int]
|
||||
args: Dict[str, str]
|
||||
tool: BaseTool
|
Loading…
Reference in new issue