tools for flow and general cleanup

Former-commit-id: 0e335b144b
grit/923f7c6f-0958-480b-8748-ea6bbf1c2084
Kye 1 year ago
parent 4091388600
commit c83754beed

@ -66,7 +66,7 @@ Let's explore how to use the `BioGPT` class with different scenarios and applica
#### Example 1: Generating Biomedical Text #### Example 1: Generating Biomedical Text
```python ```python
from biogpt import BioGPT from swarms.models import BioGPT
# Initialize the BioGPT model # Initialize the BioGPT model
biogpt = BioGPT() biogpt = BioGPT()
@ -81,7 +81,8 @@ print(generated_text)
#### Example 2: Extracting Features #### Example 2: Extracting Features
```python ```python
from biogpt import BioGPT from swarms.models import BioGPT
# Initialize the BioGPT model # Initialize the BioGPT model
biogpt = BioGPT() biogpt = BioGPT()
@ -96,7 +97,8 @@ print(features)
#### Example 3: Using Beam Search Decoding #### Example 3: Using Beam Search Decoding
```python ```python
from biogpt import BioGPT from swarms.models import BioGPT
# Initialize the BioGPT model # Initialize the BioGPT model
biogpt = BioGPT() biogpt = BioGPT()

@ -1,49 +0,0 @@
from swarms import OpenAI, Flow
from swarms.swarms.groupchat import GroupChatManager, GroupChat
api_key = ""
llm = OpenAI(
openai_api_key=api_key,
temperature=0.5,
max_tokens=3000,
)
# Initialize the flow
flow1 = Flow(
llm=llm,
max_loops=1,
system_prompt="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE",
name="silly",
dashboard=True,
)
flow2 = Flow(
llm=llm,
max_loops=1,
system_prompt="YOU ARE VERY SMART AND ANSWER RIDDLES",
name="detective",
dashboard=True,
)
flow3 = Flow(
llm=llm,
max_loops=1,
system_prompt="YOU MAKE RIDDLES",
name="riddler",
dashboard=True,
)
manager = Flow(
llm=llm,
max_loops=1,
system_prompt="YOU ARE A GROUP CHAT MANAGER",
name="manager",
dashboard=True,
)
# Example usage:
agents = [flow1, flow2, flow3]
group_chat = GroupChat(agents=agents, messages=[], max_round=10)
chat_manager = GroupChatManager(groupchat=group_chat, selector=manager)
chat_history = chat_manager("Write me a riddle")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

@ -1,5 +0,0 @@
from swarms.models.openai_chat import ChatOpenAI
model = ChatOpenAI()
print(model("Hello, my name is", 5))

@ -0,0 +1,7 @@
from swarms.models.openai_chat import OpenAIChat
model = OpenAIChat()
out = model("Hello, how are you?")
print(out)

@ -0,0 +1,62 @@
from swarms.models import Anthropic
from swarms.structs import Flow
from swarms.tools.tool import tool
import asyncio
llm = Anthropic(
anthropic_api_key="",
)
async def async_load_playwright(url: str) -> str:
"""Load the specified URLs using Playwright and parse using BeautifulSoup."""
from bs4 import BeautifulSoup
from playwright.async_api import async_playwright
results = ""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
try:
page = await browser.new_page()
await page.goto(url)
page_source = await page.content()
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
results = f"Error: {e}"
await browser.close()
return results
def run_async(coro):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(coro)
@tool
def browse_web_page(url: str) -> str:
"""Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
return run_async(async_load_playwright(url))
## Initialize the workflow
flow = Flow(
llm=llm,
max_loops=5,
tools=[browse_web_page],
dashboard=True,
)
out = flow.run(
"Generate a 10,000 word blog on mental clarity and the benefits of meditation."
)

@ -53,6 +53,7 @@ pydantic = "*"
tenacity = "*" tenacity = "*"
Pillow = "*" Pillow = "*"
chromadb = "*" chromadb = "*"
opencv-python-headless = "*"
tabulate = "*" tabulate = "*"
termcolor = "*" termcolor = "*"
black = "*" black = "*"

@ -44,6 +44,7 @@ controlnet-aux
diffusers diffusers
einops einops
imageio imageio
opencv-python-headless
imageio-ffmpeg imageio-ffmpeg
invisible-watermark invisible-watermark
kornia kornia

@ -213,7 +213,8 @@ class OpenAIChat(BaseChatModel):
# When updating this to use a SecretStr # When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them # Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str) # may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key") # openai_api_key: Optional[str] = Field(default=None, alias="api_key")
openai_api_key = "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"
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided.""" """Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url") openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service """Base URL path for API requests, leave blank if not using a proxy or service

@ -11,6 +11,7 @@ TODO:
- Add batched inputs - Add batched inputs
""" """
import asyncio import asyncio
import re
import json import json
import logging import logging
import time import time
@ -18,6 +19,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple
from termcolor import colored from termcolor import colored
import inspect import inspect
import random import random
from swarms.tools.tool import BaseTool
# Prompts # Prompts
DYNAMIC_STOP_PROMPT = """ DYNAMIC_STOP_PROMPT = """
@ -32,13 +34,25 @@ Your role is to engage in multi-step conversations with your self or the user,
generate long-form content like blogs, screenplays, or SOPs, generate long-form content like blogs, screenplays, or SOPs,
and accomplish tasks. You can have internal dialogues with yourself or can interact with the user and accomplish tasks. You can have internal dialogues with yourself or can interact with the user
to aid in these complex tasks. Your responses should be coherent, contextually relevant, and tailored to the task at hand. to aid in these complex tasks. Your responses should be coherent, contextually relevant, and tailored to the task at hand.
{DYNAMIC_STOP_PROMPT} {DYNAMIC_STOP_PROMPT}
""" """
# Utility functions # Make it able to handle multi input tools
DYNAMICAL_TOOL_USAGE = """
You have access to the following tools:
Output a JSON object with the following structure to use the tools
commands: {
"tools": {
tool1: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
}
}
{tools}
"""
# Custom stopping condition # Custom stopping condition
@ -137,7 +151,7 @@ class Flow:
# The max_loops will be set dynamically if the dynamic_loop # The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops: if self.dynamic_loops:
self.max_loops = "auto" self.max_loops = "auto"
# self.tools = tools # self.tools = tools or []
self.system_prompt = system_prompt self.system_prompt = system_prompt
self.agent_name = agent_name self.agent_name = agent_name
self.saved_state_path = saved_state_path self.saved_state_path = saved_state_path
@ -193,6 +207,73 @@ class Flow:
return "\n".join(params_str_list) return "\n".join(params_str_list)
def parse_tool_command(self, text: str):
# Parse the text for tool usage
pass
def get_tool_description(self):
"""Get the tool description"""
tool_descriptions = []
for tool in self.tools:
description = f"{tool.name}: {tool.description}"
tool_descriptions.append(description)
return "\n".join(tool_descriptions)
def find_tool_by_name(self, name: str):
"""Find a tool by name"""
for tool in self.tools:
if tool.name == name:
return tool
return None
def construct_dynamic_prompt(self):
"""Construct the dynamic prompt"""
tools_description = self.get_tool_description()
return DYNAMICAL_TOOL_USAGE.format(tools=tools_description)
def extract_tool_commands(self, text: str):
"""
Extract the tool commands from the text
Example:
```json
{
"tool": "tool_name",
"params": {
"tool1": "inputs",
"param2": "value2"
}
}
```
"""
# Regex to find JSON like strings
pattern = r"```json(.+?)```"
matches = re.findall(pattern, text, re.DOTALL)
json_commands = []
for match in matches:
try:
json_commands = json.loads(match)
json_commands.append(json_commands)
except Exception as error:
print(f"Error parsing JSON command: {error}")
def parse_and_execute_tools(self, response):
"""Parse and execute the tools"""
json_commands = self.extract_tool_commands(response)
for command in json_commands:
tool_name = command.get("tool")
params = command.get("parmas", {})
self.execute_tool(tool_name, params)
def execute_tools(self, tool_name, params):
"""Execute the tool with the provided params"""
tool = self.tool_find_by_name(tool_name)
if tool:
# Execute the tool with the provided parameters
tool_result = tool.run(**params)
print(tool_result)
def truncate_history(self): def truncate_history(self):
""" """
Take the history and truncate it to fit into the model context length Take the history and truncate it to fit into the model context length
@ -287,10 +368,13 @@ class Flow:
5. Repeat until stopping condition is met or max_loops is reached 5. Repeat until stopping condition is met or max_loops is reached
""" """
dynamic_prompt = self.construct_dynamic_prompt()
combined_prompt = f"{dynamic_prompt}\n{task}"
# Activate Autonomous agent message # Activate Autonomous agent message
self.activate_autonomous_agent() self.activate_autonomous_agent()
response = task response = combined_prompt # or task
history = [f"{self.user_name}: {task}"] history = [f"{self.user_name}: {task}"]
# If dashboard = True then print the dashboard # If dashboard = True then print the dashboard
@ -318,8 +402,13 @@ class Flow:
while attempt < self.retry_attempts: while attempt < self.retry_attempts:
try: try:
response = self.llm( response = self.llm(
task**kwargs, task,
**kwargs,
) )
# If there are any tools then parse and execute them
# if self.tools:
# self.parse_and_execute_tools(response)
if self.interactive: if self.interactive:
print(f"AI: {response}") print(f"AI: {response}")
history.append(f"AI: {response}") history.append(f"AI: {response}")

@ -25,7 +25,7 @@ class AutoBlogGenSwarm:
Topic Selection Agent: Topic Selection Agent:
- Generate 10 topics on gaining mental clarity using Taosim and Christian meditation - Generate 10 topics on gaining mental clarity using Taosim and Christian meditation
Draft Agent: Draft Agent:
- Write a 100% unique, creative and in human-like style article of a minimum of 5,000 words using headings and sub-headings. - Write a 100% unique, creative and in human-like style article of a minimum of 5,000 words using headings and sub-headings.
@ -42,8 +42,9 @@ class AutoBlogGenSwarm:
swarm.run() swarm.run()
``` ```
""" """
def __init__( def __init__(
self, self,
llm, llm,
@ -62,7 +63,6 @@ class AutoBlogGenSwarm:
self.max_retries = max_retries self.max_retries = max_retries
self.retry_attempts = retry_attempts self.retry_attempts = retry_attempts
def print_beautifully(self, subheader: str, text: str): def print_beautifully(self, subheader: str, text: str):
"""Prints the text beautifully""" """Prints the text beautifully"""
print( print(
@ -81,9 +81,9 @@ class AutoBlogGenSwarm:
def social_media_prompt(self, article: str): def social_media_prompt(self, article: str):
"""Gets the social media prompt""" """Gets the social media prompt"""
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace("{{ARTICLE}}", article).replace( prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace(
"{{GOAL}}", self.objective "{{ARTICLE}}", article
) ).replace("{{GOAL}}", self.objective)
return prompt return prompt
def get_review_prompt(self, article: str): def get_review_prompt(self, article: str):
@ -91,7 +91,6 @@ class AutoBlogGenSwarm:
prompt = REVIEW_PROMPT.replace("{{ARTICLE}}", article) prompt = REVIEW_PROMPT.replace("{{ARTICLE}}", article)
return prompt return prompt
def step(self): def step(self):
"""Steps through the task""" """Steps through the task"""
topic_selection_agent = self.llm(self.topic_selection_agent_prompt) topic_selection_agent = self.llm(self.topic_selection_agent_prompt)
@ -107,16 +106,14 @@ class AutoBlogGenSwarm:
review_agent = self.print_beautifully("Review Agent", review_agent) review_agent = self.print_beautifully("Review Agent", review_agent)
# Agent that publishes on social media # Agent that publishes on social media
distribution_agent = self.llm( distribution_agent = self.llm(self.social_media_prompt(article=review_agent))
self.social_media_prompt(article=review_agent)
)
distribution_agent = self.print_beautifully( distribution_agent = self.print_beautifully(
"Distribution Agent", distribution_agent "Distribution Agent", distribution_agent
) )
def run(self): def run(self):
"""Runs the swarm""" """Runs the swarm"""
for attempt in range(self.retry_attempts): for attempt in range(self.retry_attempts):
try: try:
for i in range(self.iterations): for i in range(self.iterations):
self.step() self.step()
@ -124,13 +121,13 @@ class AutoBlogGenSwarm:
print(colored(f"Error while running AutoBlogGenSwarm {error}", "red")) print(colored(f"Error while running AutoBlogGenSwarm {error}", "red"))
if attempt == self.retry_attempts - 1: if attempt == self.retry_attempts - 1:
raise raise
def update_task(self, new_task: str): def update_task(self, new_task: str):
""" """
Updates the task of the swarm Updates the task of the swarm
Args: Args:
new_task (str): New task to be performed by the swarm new_task (str): New task to be performed by the swarm
""" """
self.topic_selection_agent = new_task self.topic_selection_agent = new_task

@ -29,6 +29,7 @@ from pydantic import (
) )
from langchain.schema.runnable import Runnable, RunnableConfig, RunnableSerializable from langchain.schema.runnable import Runnable, RunnableConfig, RunnableSerializable
class SchemaAnnotationError(TypeError): class SchemaAnnotationError(TypeError):
"""Raised when 'args_schema' is missing or has an incorrect type annotation.""" """Raised when 'args_schema' is missing or has an incorrect type annotation."""

Loading…
Cancel
Save