[BUFG][Reliable Tool Usage]

pull/440/head
Kye 9 months ago
parent 71f6aaec25
commit 8a656f92d1

@ -447,46 +447,29 @@ A Plug in and play conversational agent with `GPT4`, `Mixytral`, or any of our m
- Reliable, this simple system will always provide responses you want.
```python
import os
from dotenv import load_dotenv
from swarms import Agent, Anthropic
from swarms import Conversation, OpenAIChat
conv = Conversation(
time_enabled=True,
## Initialize the workflow
agent = Agent(
agent_name="Transcript Generator",
agent_description=(
"Generate a transcript for a youtube video on what swarms"
" are!"
),
llm=Anthropic(),
max_loops=3,
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True, # Set to True
)
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Run the workflow on a task
agent("Generate a transcript for a youtube video on what swarms are!")
# Initialize the language model
llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4")
# Run the language model in a loop
def interactive_conversation(llm):
conv = Conversation()
while True:
user_input = input("User: ")
conv.add("user", user_input)
if user_input.lower() == "quit":
break
task = conv.return_history_as_string() # Get the conversation history
out = llm(task)
conv.add("assistant", out)
print(
f"Assistant: {out}",
)
conv.display_conversation()
conv.export_conversation("conversation.txt")
# Replace with your LLM instance
interactive_conversation(llm)
```
@ -1029,6 +1012,90 @@ autoswarm.run("Analyze these financial data and give me a summary")
```
## `AgentRearrange`
Inspired by Einops and einsum, this orchestration techniques enables you to map out the relationships between various agents. For example you specify linear and sequential relationships like `a -> a1 -> a2 -> a3` or concurrent relationships where the first agent will send a message to 3 agents all at once: `a -> a1, a2, a3`. You can customize your workflow to mix sequential and concurrent relationships
```python
from swarms import Agent, Anthropic, AgentRearrange,
## Initialize the workflow
agent = Agent(
agent_name="t",
agent_description=(
"Generate a transcript for a youtube video on what swarms"
" are!"
),
system_prompt=(
"Generate a transcript for a youtube video on what swarms"
" are!"
),
llm=Anthropic(),
max_loops=1,
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
)
agent2 = Agent(
agent_name="t1",
agent_description=(
"Generate a transcript for a youtube video on what swarms"
" are!"
),
llm=Anthropic(),
max_loops=1,
system_prompt="Summarize the transcript",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
)
agent3 = Agent(
agent_name="t2",
agent_description=(
"Generate a transcript for a youtube video on what swarms"
" are!"
),
llm=Anthropic(),
max_loops=1,
system_prompt="Finalize the transcript",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
)
# Rearrange the agents
rearrange = AgentRearrange(
agents=[agent, agent2, agent3],
verbose=True,
# custom_prompt="Summarize the transcript",
)
# Run the workflow on a task
results = rearrange(
# pattern="t -> t1, t2 -> t2",
pattern="t -> t1 -> t2",
default_task=(
"Generate a transcript for a YouTube video on what swarms"
" are!"
),
t="Generate a transcript for a YouTube video on what swarms are!",
# t2="Summarize the transcript",
# t3="Finalize the transcript",
)
# print(results)
```
---
## Documentation

@ -7,11 +7,13 @@ Plan -> act in a loop until observation is met
- Text Editor
- Browser
"""
from swarms import Agent, OpenAIChat, tool
from swarms import Agent, Anthropic, tool
import subprocess
# Model
llm = OpenAIChat()
llm = Anthropic(
temperature=0.1,
)
# Tools
@ -37,7 +39,7 @@ def terminal(
@tool
def browser(query: str):
"""
Search the query in the browser.
Search the query in the browser with the `browser` tool.
Args:
query (str): The query to search in the browser.
@ -58,10 +60,10 @@ agent = Agent(
system_prompt=(
"Autonomous agent that can interact with humans and other"
" agents. Be Helpful and Kind. Use the tools provided to"
" assist the user."
" assist the user. Return all code in markdown format."
),
llm=llm,
max_loops=4,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
@ -73,5 +75,5 @@ agent = Agent(
)
# Run the agent
out = agent("What is the weather today in palo alto?")
out = agent("What is the weather today in palo alto use the browser tool to search for the weather?")
print(out)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "4.7.3"
version = "4.7.4"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -138,16 +138,13 @@ class Worker(Agent):
ai_role=self.role,
tools=self.tools,
llm=self.llm,
memory=self.vectorstore.as_retriever(
search_kwargs=self.search_kwargs
),
# memory = None,
human_in_the_loop=self.human_in_the_loop,
)
except Exception as error:
raise RuntimeError(f"Error setting up agent: {error}")
# @log_decorator
@error_decorator
@timing_decorator
def run(self, task: str = None, *args, **kwargs):
@ -166,7 +163,6 @@ class Worker(Agent):
except Exception as error:
raise RuntimeError(f"Error while running agent: {error}")
# @log_decorator
@error_decorator
@timing_decorator
def __call__(self, task: str = None, *args, **kwargs):

@ -24,7 +24,8 @@ from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.data_to_text import data_to_text
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.tools.exec_tool import execute_tool_by_name
from swarms.tools.function_util import process_tool_docs
# Utils
# Custom stopping condition
@ -320,8 +321,17 @@ class Agent:
memory=self.short_memory.return_history_as_string(),
)
# Append the tools prompt to the sop
self.sop = f"{self.sop}\n{tools_prompt}"
# Append the tools prompt to the short_term_memory
self.short_memory.add(
role=self.agent_name, content=tools_prompt
)
# And, add the tool documentation to the memory
for tool in self.tools:
tool_docs = process_tool_docs(tool)
self.short_memory.add(
role=self.agent_name, content=tool_docs
)
# If the long term memory is provided then set the long term memory prompt
@ -607,6 +617,19 @@ class Agent:
role=self.agent_name, content=response
)
if self.tools:
# Extract code from markdown
response = extract_code_from_markdown(
response
)
# Execute the tool by name
execute_tool_by_name(
response,
self.tools,
stop_token=self.stopping_token,
)
if self.code_interpreter:
extracted_code = (
extract_code_from_markdown(response)
@ -676,8 +699,7 @@ class Agent:
# Check stopping conditions
if (
self.stopping_token
and self.stopping_token in response
self.stopping_token in response
):
break
elif (
@ -685,13 +707,13 @@ class Agent:
and self._check_stopping_condition(response)
):
break
elif self.stopping_func and self.stopping_func(
elif self.stopping_func is not None and self.stopping_func(
response
):
break
if self.interactive:
user_input = input("You: ")
user_input = colored(input("You: "), "red")
# User-defined exit command
if (

@ -1,4 +1,5 @@
import json
import concurrent.futures
import re
from abc import abstractmethod
from typing import Dict, List, NamedTuple
@ -101,7 +102,46 @@ def execute_tool_by_name(
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
# Check if multiple tools are used
tool_names = [name for name in tools if name in text]
if len(tool_names) > 1:
# Execute tools concurrently
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for tool_name in tool_names:
futures.append(
executor.submit(
tools[tool_name].run, action.args
)
)
# Wait for all futures to complete
concurrent.futures.wait(futures)
# Get results from completed futures
results = [
future.result()
for future in futures
if future.done()
]
# Process results
for result in results:
# Handle errors
if isinstance(result, Exception):
result = (
f"Error: {str(result)},"
f" {type(result).__name__}, args:"
f" {action.args}"
)
# Handle successful execution
else:
result = (
f"Command {tool.name} returned:"
f" {result}"
)
else:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args:"
@ -121,5 +161,4 @@ def execute_tool_by_name(
"Please refer to the 'COMMANDS' list for available "
"commands and only respond in the specified JSON format."
)
return result

@ -0,0 +1,25 @@
import inspect
def process_tool_docs(item):
"""
Process the documentation for a given item.
Args:
item: The item to process the documentation for.
Returns:
metadata: The processed metadata containing the item's name, documentation, and source code.
"""
# If item is an instance of a class, get its class
if not inspect.isclass(item) and hasattr(item, '__class__'):
item = item.__class__
doc = inspect.getdoc(item)
source = inspect.getsource(item)
is_class = inspect.isclass(item)
item_type = "Class Name" if is_class else "Function Name"
metadata = f"{item_type}: {item.__name__}\n\n"
if doc:
metadata += f"Documentation:\n{doc}\n\n"
metadata += f"\n{source}"
return metadata
Loading…
Cancel
Save