Former-commit-id: e0f43399d5
discord-bot-framework
Kye 1 year ago
parent 4221bcbe6b
commit 1a46dabc78

@ -44,7 +44,7 @@ nodes = [
messages = [ messages = [
{ {
"role": "system", "role": "system",
"context": f"Create an a small feedforward in pytorch", "context": "Create an a small feedforward in pytorch",
} }
] ]

@ -12,4 +12,4 @@ from swarms.agents.omni_modal_agent import OmniModalAgent
#utils #utils
from swarms.agents.message import Message from swarms.agents.message import Message
from swarms.agents.stream_response import stream from swarms.agents.stream_response import stream
# from swarms.agents.base import AbstractAgent from swarms.agents.base import Agent

@ -1,133 +0,0 @@
from __future__ import annotations
from typing import List, Optional
from langchain.chains.llm import LLMChain
from swarms.agents.utils.Agent import AgentOutputParser
from swarms.agents.utils.human_input import HumanInputRun
from swarms.memory.base_memory import BaseChatMessageHistory, ChatMessageHistory
from swarms.memory.document import Document
from swarms.models.base import AbstractModel
from swarms.models.prompts.agent_prompt_auto import (
MessageFormatter,
PromptConstructor,
)
from swarms.models.prompts.agent_prompt_generator import FINISH_NAME
from swarms.models.prompts.base import (
AIMessage,
HumanMessage,
SystemMessage,
)
from swarms.tools.base import BaseTool
class Agent:
"""Base Agent class"""
def __init__(
self,
ai_name: str,
chain: LLMChain,
memory,
output_parser: AgentOutputParser,
tools: List[BaseTool],
feedback_tool: Optional[HumanInputRun] = None,
chat_history_memory: Optional[BaseChatMessageHistory] = None,
):
self.ai_name = ai_name
self.chain = chain
self.memory = memory
self.next_action_count = 0
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
self.chat_history_memory = chat_history_memory or ChatMessageHistory()
@classmethod
def integrate(
cls,
ai_name: str,
ai_role: str,
memory,
tools: List[BaseTool],
llm: AbstractModel,
human_in_the_loop: bool = False,
output_parser: Optional[AgentOutputParser] = None,
chat_history_memory: Optional[BaseChatMessageHistory] = None,
) -> Agent:
prompt_constructor = PromptConstructor(ai_name=ai_name,
ai_role=ai_role,
tools=tools)
message_formatter = MessageFormatter()
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt_constructor=prompt_constructor, message_formatter=message_formatter)
return cls(
ai_name,
memory,
chain,
output_parser or AgentOutputParser(),
tools,
feedback_tool=human_feedback_tool,
chat_history_memory=chat_history_memory,
)
def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, and respond using the format specified above:"
)
loop_count = 0
while True:
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
goals=goals,
messages=self.chat_history_memory.messages,
memory=self.memory,
user_input=user_input,
)
print(assistant_reply)
self.chat_history_memory.add_message(HumanMessage(content=user_input))
self.chat_history_memory.add_message(AIMessage(content=assistant_reply))
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
if action.name == FINISH_NAME:
return action.args["response"]
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
except Exception as error:
observation = (
f"Validation Error in args: {str(error)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"""Unknown command '{action.name}'.
Please refer to the 'COMMANDS' list for available
commands and only respond in the specified JSON format."""
)
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback
self.memory.add_documents([Document(page_content=memory_to_add)])
self.chat_history_memory.add_message(SystemMessage(content=result))

@ -1,28 +1,65 @@
from typing import Dict, List, Optional, Union
class Agent:
"""(In preview) An abstract class for AI agent.
An agent can communicate with other agents and perform actions.
Different agents can differ in what actions they perform in the `receive` method.
Agents are full and completed:
Agents = llm + tools + memory
"""
class AbsractAgent:
def __init__( def __init__(
self, self,
llm, name: str,
temperature #tools: List[Tool],
) -> None: #memory: Memory
pass ):
"""
Args:
name (str): name of the agent.
"""
# a dictionary of conversations, default value is list
self._name = name
@property
def name(self):
"""Get the name of the agent."""
return self._name
def tools(self, tools):
"""init tools"""
#single query def memory(self, memory_store):
def run(self, task: str): """init memory"""
pass pass
# conversational back and forth def reset(self):
def chat(self, message: str): """(Abstract method) Reset the agent."""
message_historys = []
message_historys.append(message)
reply = self.run(message) def run(self, task: str):
message_historys.append(reply) """Run the agent once"""
return message_historys def _arun(self, taks: str):
"""Run Async run"""
def chat(self, messages: List[Dict]):
"""Chat with the agent"""
def step(self, message): def _achat(
pass self,
messages: List[Dict]
def reset(self): ):
pass """Asynchronous Chat"""
def step(self, message: str):
"""Step through the agent"""
def _astep(self, message: str):
"""Asynchronous step"""

File diff suppressed because it is too large Load Diff

@ -85,9 +85,9 @@ class GroupChatManager(Worker):
): ):
super().__init__( super().__init__(
ai_name=ai_name, ai_name=ai_name,
max_consecutive_auto_reply=max_consecutive_auto_reply, # max_consecutive_auto_reply=max_consecutive_auto_reply,
human_input_mode=human_input_mode, # human_input_mode=human_input_mode,
system_message=system_message, # system_message=system_message,
**kwargs **kwargs
) )
self.register_reply( self.register_reply(

@ -1 +1,2 @@
from swarms.workers.worker import Worker from swarms.workers.worker import Worker
from swarms.workers.base import AbstractWorker

@ -0,0 +1,96 @@
from typing import Dict, List, Optional, Union
class AbstractWorker:
"""(In preview) An abstract class for AI worker.
An worker can communicate with other workers and perform actions.
Different workers can differ in what actions they perform in the `receive` method.
"""
def __init__(
self,
name: str,
):
"""
Args:
name (str): name of the worker.
"""
# a dictionary of conversations, default value is list
self._name = name
@property
def name(self):
"""Get the name of the worker."""
return self._name
def run(
self,
task: str
):
"""Run the worker agent once"""
def send(
self,
message: Union[Dict, str],
recipient: "Agent",
request_reply: Optional[bool] = None
):
"""(Abstract method) Send a message to another worker."""
async def a_send(
self,
message: Union[Dict, str],
recipient: "Agent",
request_reply: Optional[bool] = None
):
"""(Aabstract async method) Send a message to another worker."""
def receive(
self,
message: Union[Dict, str],
sender: "Agent",
request_reply: Optional[bool] = None
):
"""(Abstract method) Receive a message from another worker."""
async def a_receive(
self,
message: Union[Dict, str],
sender: "Agent",
request_reply: Optional[bool] = None
):
"""(Abstract async method) Receive a message from another worker."""
def reset(self):
"""(Abstract method) Reset the worker."""
def generate_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional["Agent"] = None,
**kwargs,
) -> Union[str, Dict, None]:
"""(Abstract method) Generate a reply based on the received messages.
Args:
messages (list[dict]): a list of messages received.
sender: sender of an Agent instance.
Returns:
str or dict or None: the generated reply. If None, no reply is generated.
"""
async def a_generate_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional["Agent"] = None,
**kwargs,
) -> Union[str, Dict, None]:
"""(Abstract async method) Generate a reply based on the received messages.
Args:
messages (list[dict]): a list of messages received.
sender: sender of an Agent instance.
Returns:
str or dict or None: the generated reply. If None, no reply is generated.
"""

@ -5,7 +5,7 @@ from langchain.embeddings import OpenAIEmbeddings
from langchain.tools.human.tool import HumanInputRun from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores import FAISS from langchain.vectorstores import FAISS
from langchain_experimental.autonomous_agents import AutoGPT from langchain_experimental.autonomous_agents import AutoGPT
from typing import Dict, List, Optional, Union
from swarms.agents.message import Message from swarms.agents.message import Message
from swarms.tools.autogpt import ( from swarms.tools.autogpt import (
ReadFileTool, ReadFileTool,
@ -302,4 +302,13 @@ class Worker:
""" """
for token in response.split(): for token in response.split():
yield token yield token
@staticmethod
def _message_to_dict(message: Union[Dict, str]):
"""Convert a message"""
if isinstance(message, str):
return {"content": message}
else:
return message
Loading…
Cancel
Save