tools for flow and general cleanup

Former-commit-id: 0e335b144b
grit/923f7c6f-0958-480b-8748-ea6bbf1c2084
Kye 1 year ago
parent 4091388600
commit c83754beed

@ -66,7 +66,7 @@ Let's explore how to use the `BioGPT` class with different scenarios and applica
#### Example 1: Generating Biomedical Text
```python
from biogpt import BioGPT
from swarms.models import BioGPT
# Initialize the BioGPT model
biogpt = BioGPT()
@ -81,7 +81,8 @@ print(generated_text)
#### Example 2: Extracting Features
```python
from biogpt import BioGPT
from swarms.models import BioGPT
# Initialize the BioGPT model
biogpt = BioGPT()
@ -96,7 +97,8 @@ print(features)
#### Example 3: Using Beam Search Decoding
```python
from biogpt import BioGPT
from swarms.models import BioGPT
# Initialize the BioGPT model
biogpt = BioGPT()

@ -1,49 +0,0 @@
from swarms import OpenAI, Flow
from swarms.swarms.groupchat import GroupChatManager, GroupChat
api_key = ""
llm = OpenAI(
openai_api_key=api_key,
temperature=0.5,
max_tokens=3000,
)
# Initialize the flow
flow1 = Flow(
llm=llm,
max_loops=1,
system_prompt="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE",
name="silly",
dashboard=True,
)
flow2 = Flow(
llm=llm,
max_loops=1,
system_prompt="YOU ARE VERY SMART AND ANSWER RIDDLES",
name="detective",
dashboard=True,
)
flow3 = Flow(
llm=llm,
max_loops=1,
system_prompt="YOU MAKE RIDDLES",
name="riddler",
dashboard=True,
)
manager = Flow(
llm=llm,
max_loops=1,
system_prompt="YOU ARE A GROUP CHAT MANAGER",
name="manager",
dashboard=True,
)
# Example usage:
agents = [flow1, flow2, flow3]
group_chat = GroupChat(agents=agents, messages=[], max_round=10)
chat_manager = GroupChatManager(groupchat=group_chat, selector=manager)
chat_history = chat_manager("Write me a riddle")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 MiB

@ -1,5 +0,0 @@
from swarms.models.openai_chat import ChatOpenAI
model = ChatOpenAI()
print(model("Hello, my name is", 5))

@ -0,0 +1,7 @@
from swarms.models.openai_chat import OpenAIChat
model = OpenAIChat()
out = model("Hello, how are you?")
print(out)

@ -0,0 +1,62 @@
from swarms.models import Anthropic
from swarms.structs import Flow
from swarms.tools.tool import tool
import asyncio
llm = Anthropic(
anthropic_api_key="",
)
async def async_load_playwright(url: str) -> str:
"""Load the specified URLs using Playwright and parse using BeautifulSoup."""
from bs4 import BeautifulSoup
from playwright.async_api import async_playwright
results = ""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
try:
page = await browser.new_page()
await page.goto(url)
page_source = await page.content()
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
results = f"Error: {e}"
await browser.close()
return results
def run_async(coro):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(coro)
@tool
def browse_web_page(url: str) -> str:
"""Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
return run_async(async_load_playwright(url))
## Initialize the workflow
flow = Flow(
llm=llm,
max_loops=5,
tools=[browse_web_page],
dashboard=True,
)
out = flow.run(
"Generate a 10,000 word blog on mental clarity and the benefits of meditation."
)

@ -53,6 +53,7 @@ pydantic = "*"
tenacity = "*"
Pillow = "*"
chromadb = "*"
opencv-python-headless = "*"
tabulate = "*"
termcolor = "*"
black = "*"

@ -44,6 +44,7 @@ controlnet-aux
diffusers
einops
imageio
opencv-python-headless
imageio-ffmpeg
invisible-watermark
kornia

@ -213,7 +213,8 @@ class OpenAIChat(BaseChatModel):
# When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
# openai_api_key: Optional[str] = Field(default=None, alias="api_key")
openai_api_key = "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"
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service

@ -11,6 +11,7 @@ TODO:
- Add batched inputs
"""
import asyncio
import re
import json
import logging
import time
@ -18,6 +19,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple
from termcolor import colored
import inspect
import random
from swarms.tools.tool import BaseTool
# Prompts
DYNAMIC_STOP_PROMPT = """
@ -32,13 +34,25 @@ Your role is to engage in multi-step conversations with your self or the user,
generate long-form content like blogs, screenplays, or SOPs,
and accomplish tasks. You can have internal dialogues with yourself or can interact with the user
to aid in these complex tasks. Your responses should be coherent, contextually relevant, and tailored to the task at hand.
{DYNAMIC_STOP_PROMPT}
"""
# Utility functions
# Make it able to handle multi input tools
DYNAMICAL_TOOL_USAGE = """
You have access to the following tools:
Output a JSON object with the following structure to use the tools
commands: {
"tools": {
tool1: "tool_name",
"params": {
"tool1": "inputs",
"tool1": "inputs"
}
}
}
{tools}
"""
# Custom stopping condition
@ -137,7 +151,7 @@ class Flow:
# The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops:
self.max_loops = "auto"
# self.tools = tools
# self.tools = tools or []
self.system_prompt = system_prompt
self.agent_name = agent_name
self.saved_state_path = saved_state_path
@ -193,6 +207,73 @@ class Flow:
return "\n".join(params_str_list)
def parse_tool_command(self, text: str):
# Parse the text for tool usage
pass
def get_tool_description(self):
"""Get the tool description"""
tool_descriptions = []
for tool in self.tools:
description = f"{tool.name}: {tool.description}"
tool_descriptions.append(description)
return "\n".join(tool_descriptions)
def find_tool_by_name(self, name: str):
"""Find a tool by name"""
for tool in self.tools:
if tool.name == name:
return tool
return None
def construct_dynamic_prompt(self):
"""Construct the dynamic prompt"""
tools_description = self.get_tool_description()
return DYNAMICAL_TOOL_USAGE.format(tools=tools_description)
def extract_tool_commands(self, text: str):
"""
Extract the tool commands from the text
Example:
```json
{
"tool": "tool_name",
"params": {
"tool1": "inputs",
"param2": "value2"
}
}
```
"""
# Regex to find JSON like strings
pattern = r"```json(.+?)```"
matches = re.findall(pattern, text, re.DOTALL)
json_commands = []
for match in matches:
try:
json_commands = json.loads(match)
json_commands.append(json_commands)
except Exception as error:
print(f"Error parsing JSON command: {error}")
def parse_and_execute_tools(self, response):
"""Parse and execute the tools"""
json_commands = self.extract_tool_commands(response)
for command in json_commands:
tool_name = command.get("tool")
params = command.get("parmas", {})
self.execute_tool(tool_name, params)
def execute_tools(self, tool_name, params):
"""Execute the tool with the provided params"""
tool = self.tool_find_by_name(tool_name)
if tool:
# Execute the tool with the provided parameters
tool_result = tool.run(**params)
print(tool_result)
def truncate_history(self):
"""
Take the history and truncate it to fit into the model context length
@ -287,10 +368,13 @@ class Flow:
5. Repeat until stopping condition is met or max_loops is reached
"""
dynamic_prompt = self.construct_dynamic_prompt()
combined_prompt = f"{dynamic_prompt}\n{task}"
# Activate Autonomous agent message
self.activate_autonomous_agent()
response = task
response = combined_prompt # or task
history = [f"{self.user_name}: {task}"]
# If dashboard = True then print the dashboard
@ -318,8 +402,13 @@ class Flow:
while attempt < self.retry_attempts:
try:
response = self.llm(
task**kwargs,
task,
**kwargs,
)
# If there are any tools then parse and execute them
# if self.tools:
# self.parse_and_execute_tools(response)
if self.interactive:
print(f"AI: {response}")
history.append(f"AI: {response}")

@ -44,6 +44,7 @@ class AutoBlogGenSwarm:
"""
def __init__(
self,
llm,
@ -62,7 +63,6 @@ class AutoBlogGenSwarm:
self.max_retries = max_retries
self.retry_attempts = retry_attempts
def print_beautifully(self, subheader: str, text: str):
"""Prints the text beautifully"""
print(
@ -81,9 +81,9 @@ class AutoBlogGenSwarm:
def social_media_prompt(self, article: str):
"""Gets the social media prompt"""
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace("{{ARTICLE}}", article).replace(
"{{GOAL}}", self.objective
)
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace(
"{{ARTICLE}}", article
).replace("{{GOAL}}", self.objective)
return prompt
def get_review_prompt(self, article: str):
@ -91,7 +91,6 @@ class AutoBlogGenSwarm:
prompt = REVIEW_PROMPT.replace("{{ARTICLE}}", article)
return prompt
def step(self):
"""Steps through the task"""
topic_selection_agent = self.llm(self.topic_selection_agent_prompt)
@ -107,9 +106,7 @@ class AutoBlogGenSwarm:
review_agent = self.print_beautifully("Review Agent", review_agent)
# Agent that publishes on social media
distribution_agent = self.llm(
self.social_media_prompt(article=review_agent)
)
distribution_agent = self.llm(self.social_media_prompt(article=review_agent))
distribution_agent = self.print_beautifully(
"Distribution Agent", distribution_agent
)

@ -29,6 +29,7 @@ from pydantic import (
)
from langchain.schema.runnable import Runnable, RunnableConfig, RunnableSerializable
class SchemaAnnotationError(TypeError):
"""Raised when 'args_schema' is missing or has an incorrect type annotation."""

Loading…
Cancel
Save