diff --git a/docs/swarms/models/biogpt.md b/docs/swarms/models/biogpt.md index 291b917c..c43557b6 100644 --- a/docs/swarms/models/biogpt.md +++ b/docs/swarms/models/biogpt.md @@ -66,7 +66,7 @@ Let's explore how to use the `BioGPT` class with different scenarios and applica #### Example 1: Generating Biomedical Text ```python -from biogpt import BioGPT +from swarms.models import BioGPT # Initialize the BioGPT model biogpt = BioGPT() @@ -81,7 +81,8 @@ print(generated_text) #### Example 2: Extracting Features ```python -from biogpt import BioGPT +from swarms.models import BioGPT + # Initialize the BioGPT model biogpt = BioGPT() @@ -96,7 +97,8 @@ print(features) #### Example 3: Using Beam Search Decoding ```python -from biogpt import BioGPT +from swarms.models import BioGPT + # Initialize the BioGPT model biogpt = BioGPT() diff --git a/groupchat.py b/groupchat.py deleted file mode 100644 index 71d40a03..00000000 --- a/groupchat.py +++ /dev/null @@ -1,49 +0,0 @@ -from swarms import OpenAI, Flow -from swarms.swarms.groupchat import GroupChatManager, GroupChat - - -api_key = "" - -llm = OpenAI( - openai_api_key=api_key, - temperature=0.5, - max_tokens=3000, -) - -# Initialize the flow -flow1 = Flow( - llm=llm, - max_loops=1, - system_prompt="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE", - name="silly", - dashboard=True, -) -flow2 = Flow( - llm=llm, - max_loops=1, - system_prompt="YOU ARE VERY SMART AND ANSWER RIDDLES", - name="detective", - dashboard=True, -) -flow3 = Flow( - llm=llm, - max_loops=1, - system_prompt="YOU MAKE RIDDLES", - name="riddler", - dashboard=True, -) -manager = Flow( - llm=llm, - max_loops=1, - system_prompt="YOU ARE A GROUP CHAT MANAGER", - name="manager", - dashboard=True, -) - - -# Example usage: -agents = [flow1, flow2, flow3] - -group_chat = GroupChat(agents=agents, messages=[], max_round=10) -chat_manager = GroupChatManager(groupchat=group_chat, selector=manager) -chat_history = chat_manager("Write me a riddle") diff --git a/images/10f498c2-e22a-4f7f-9e50-56bf1ef92629.png b/images/10f498c2-e22a-4f7f-9e50-56bf1ef92629.png deleted file mode 100644 index 1dece111..00000000 Binary files a/images/10f498c2-e22a-4f7f-9e50-56bf1ef92629.png and /dev/null differ diff --git a/images/1c990ee0-ed68-4375-9731-9c9c25a72fac.png b/images/1c990ee0-ed68-4375-9731-9c9c25a72fac.png deleted file mode 100644 index c4b740c3..00000000 Binary files a/images/1c990ee0-ed68-4375-9731-9c9c25a72fac.png and /dev/null differ diff --git a/images/2570cc4b-fafe-4f41-8193-ea9b563156e4.png b/images/2570cc4b-fafe-4f41-8193-ea9b563156e4.png deleted file mode 100644 index dfb8834f..00000000 Binary files a/images/2570cc4b-fafe-4f41-8193-ea9b563156e4.png and /dev/null differ diff --git a/images/35661b4a-f230-47a1-91bf-f876935151ed.png b/images/35661b4a-f230-47a1-91bf-f876935151ed.png deleted file mode 100644 index 163d9a6c..00000000 Binary files a/images/35661b4a-f230-47a1-91bf-f876935151ed.png and /dev/null differ diff --git a/images/4b2161eb-bc44-4ee9-b106-208408b81d42.png b/images/4b2161eb-bc44-4ee9-b106-208408b81d42.png deleted file mode 100644 index 5f0af5c1..00000000 Binary files a/images/4b2161eb-bc44-4ee9-b106-208408b81d42.png and /dev/null differ diff --git a/images/4e4ea9d1-e1e3-4609-a200-8d83b5912a44.png b/images/4e4ea9d1-e1e3-4609-a200-8d83b5912a44.png deleted file mode 100644 index 1aa8fb4b..00000000 Binary files a/images/4e4ea9d1-e1e3-4609-a200-8d83b5912a44.png and /dev/null differ diff --git a/images/5081867f-bb73-4ece-b746-df2247e55da5.png b/images/5081867f-bb73-4ece-b746-df2247e55da5.png deleted file mode 100644 index 6967f2e2..00000000 Binary files a/images/5081867f-bb73-4ece-b746-df2247e55da5.png and /dev/null differ diff --git a/images/a3fd26f3-0ee7-49b1-9e05-60b1dde1a1a8.png b/images/a3fd26f3-0ee7-49b1-9e05-60b1dde1a1a8.png deleted file mode 100644 index 725208ef..00000000 Binary files a/images/a3fd26f3-0ee7-49b1-9e05-60b1dde1a1a8.png and /dev/null differ diff --git a/images/af8e6856-9d24-46d5-81fc-c9b2010d5d77.png b/images/af8e6856-9d24-46d5-81fc-c9b2010d5d77.png deleted file mode 100644 index 8d77ea96..00000000 Binary files a/images/af8e6856-9d24-46d5-81fc-c9b2010d5d77.png and /dev/null differ diff --git a/images/f0f1d0e8-1672-4b9c-af1f-e6979f8a407c.png b/images/f0f1d0e8-1672-4b9c-af1f-e6979f8a407c.png deleted file mode 100644 index 14827bf8..00000000 Binary files a/images/f0f1d0e8-1672-4b9c-af1f-e6979f8a407c.png and /dev/null differ diff --git a/images/f4992864-b211-4510-9e4a-1148470dd5ec.png b/images/f4992864-b211-4510-9e4a-1148470dd5ec.png deleted file mode 100644 index f28f6bc0..00000000 Binary files a/images/f4992864-b211-4510-9e4a-1148470dd5ec.png and /dev/null differ diff --git a/images/ffd2e03f-4238-4b6d-b29e-a3b41624ceae.png b/images/ffd2e03f-4238-4b6d-b29e-a3b41624ceae.png deleted file mode 100644 index 5b99e316..00000000 Binary files a/images/ffd2e03f-4238-4b6d-b29e-a3b41624ceae.png and /dev/null differ diff --git a/openai_example.py b/openai_example.py deleted file mode 100644 index 1020e8f8..00000000 --- a/openai_example.py +++ /dev/null @@ -1,5 +0,0 @@ -from swarms.models.openai_chat import ChatOpenAI - -model = ChatOpenAI() - -print(model("Hello, my name is", 5)) diff --git a/playground/models/openai_example.py b/playground/models/openai_example.py new file mode 100644 index 00000000..aacab66f --- /dev/null +++ b/playground/models/openai_example.py @@ -0,0 +1,7 @@ +from swarms.models.openai_chat import OpenAIChat + +model = OpenAIChat() + +out = model("Hello, how are you?") + +print(out) diff --git a/playground/structs/flow_tools.py b/playground/structs/flow_tools.py new file mode 100644 index 00000000..647f6617 --- /dev/null +++ b/playground/structs/flow_tools.py @@ -0,0 +1,62 @@ +from swarms.models import Anthropic +from swarms.structs import Flow +from swarms.tools.tool import tool + +import asyncio + + +llm = Anthropic( + anthropic_api_key="", +) + + +async def async_load_playwright(url: str) -> str: + """Load the specified URLs using Playwright and parse using BeautifulSoup.""" + from bs4 import BeautifulSoup + from playwright.async_api import async_playwright + + results = "" + async with async_playwright() as p: + browser = await p.chromium.launch(headless=True) + try: + page = await browser.new_page() + await page.goto(url) + + page_source = await page.content() + soup = BeautifulSoup(page_source, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + text = soup.get_text() + lines = (line.strip() for line in text.splitlines()) + chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + results = "\n".join(chunk for chunk in chunks if chunk) + except Exception as e: + results = f"Error: {e}" + await browser.close() + return results + + +def run_async(coro): + event_loop = asyncio.get_event_loop() + return event_loop.run_until_complete(coro) + + +@tool +def browse_web_page(url: str) -> str: + """Verbose way to scrape a whole webpage. Likely to cause issues parsing.""" + return run_async(async_load_playwright(url)) + + +## Initialize the workflow +flow = Flow( + llm=llm, + max_loops=5, + tools=[browse_web_page], + dashboard=True, +) + +out = flow.run( + "Generate a 10,000 word blog on mental clarity and the benefits of meditation." +) diff --git a/pyproject.toml b/pyproject.toml index 8ff1df05..f76f7177 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,6 +53,7 @@ pydantic = "*" tenacity = "*" Pillow = "*" chromadb = "*" +opencv-python-headless = "*" tabulate = "*" termcolor = "*" black = "*" diff --git a/requirements.txt b/requirements.txt index 1a74a36e..82e519af 100644 --- a/requirements.txt +++ b/requirements.txt @@ -44,6 +44,7 @@ controlnet-aux diffusers einops imageio +opencv-python-headless imageio-ffmpeg invisible-watermark kornia diff --git a/swarms/models/openai_chat.py b/swarms/models/openai_chat.py index 8cb7e0c6..6ca964a2 100644 --- a/swarms/models/openai_chat.py +++ b/swarms/models/openai_chat.py @@ -213,7 +213,8 @@ class OpenAIChat(BaseChatModel): # When updating this to use a SecretStr # Check for classes that derive from this class (as some of them # may assume openai_api_key is a str) - openai_api_key: Optional[str] = Field(default=None, alias="api_key") + # openai_api_key: Optional[str] = Field(default=None, alias="api_key") + openai_api_key = "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" """Automatically inferred from env var `OPENAI_API_KEY` if not provided.""" openai_api_base: Optional[str] = Field(default=None, alias="base_url") """Base URL path for API requests, leave blank if not using a proxy or service diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index 8d89fd89..17a3fe2c 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -11,6 +11,7 @@ TODO: - Add batched inputs """ import asyncio +import re import json import logging import time @@ -18,6 +19,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple from termcolor import colored import inspect import random +from swarms.tools.tool import BaseTool # Prompts DYNAMIC_STOP_PROMPT = """ @@ -32,13 +34,25 @@ Your role is to engage in multi-step conversations with your self or the user, generate long-form content like blogs, screenplays, or SOPs, and accomplish tasks. You can have internal dialogues with yourself or can interact with the user to aid in these complex tasks. Your responses should be coherent, contextually relevant, and tailored to the task at hand. - - {DYNAMIC_STOP_PROMPT} - """ -# Utility functions +# Make it able to handle multi input tools +DYNAMICAL_TOOL_USAGE = """ +You have access to the following tools: +Output a JSON object with the following structure to use the tools +commands: { + "tools": { + tool1: "tool_name", + "params": { + "tool1": "inputs", + "tool1": "inputs" + } + } +} + +{tools} +""" # Custom stopping condition @@ -137,7 +151,7 @@ class Flow: # The max_loops will be set dynamically if the dynamic_loop if self.dynamic_loops: self.max_loops = "auto" - # self.tools = tools + # self.tools = tools or [] self.system_prompt = system_prompt self.agent_name = agent_name self.saved_state_path = saved_state_path @@ -193,6 +207,73 @@ class Flow: return "\n".join(params_str_list) + def parse_tool_command(self, text: str): + # Parse the text for tool usage + pass + + def get_tool_description(self): + """Get the tool description""" + tool_descriptions = [] + for tool in self.tools: + description = f"{tool.name}: {tool.description}" + tool_descriptions.append(description) + return "\n".join(tool_descriptions) + + def find_tool_by_name(self, name: str): + """Find a tool by name""" + for tool in self.tools: + if tool.name == name: + return tool + return None + + def construct_dynamic_prompt(self): + """Construct the dynamic prompt""" + tools_description = self.get_tool_description() + return DYNAMICAL_TOOL_USAGE.format(tools=tools_description) + + def extract_tool_commands(self, text: str): + """ + Extract the tool commands from the text + + Example: + ```json + { + "tool": "tool_name", + "params": { + "tool1": "inputs", + "param2": "value2" + } + } + ``` + + """ + # Regex to find JSON like strings + pattern = r"```json(.+?)```" + matches = re.findall(pattern, text, re.DOTALL) + json_commands = [] + for match in matches: + try: + json_commands = json.loads(match) + json_commands.append(json_commands) + except Exception as error: + print(f"Error parsing JSON command: {error}") + + def parse_and_execute_tools(self, response): + """Parse and execute the tools""" + json_commands = self.extract_tool_commands(response) + for command in json_commands: + tool_name = command.get("tool") + params = command.get("parmas", {}) + self.execute_tool(tool_name, params) + + def execute_tools(self, tool_name, params): + """Execute the tool with the provided params""" + tool = self.tool_find_by_name(tool_name) + if tool: + # Execute the tool with the provided parameters + tool_result = tool.run(**params) + print(tool_result) + def truncate_history(self): """ Take the history and truncate it to fit into the model context length @@ -287,10 +368,13 @@ class Flow: 5. Repeat until stopping condition is met or max_loops is reached """ + dynamic_prompt = self.construct_dynamic_prompt() + combined_prompt = f"{dynamic_prompt}\n{task}" + # Activate Autonomous agent message self.activate_autonomous_agent() - response = task + response = combined_prompt # or task history = [f"{self.user_name}: {task}"] # If dashboard = True then print the dashboard @@ -318,8 +402,13 @@ class Flow: while attempt < self.retry_attempts: try: response = self.llm( - task**kwargs, + task, + **kwargs, ) + # If there are any tools then parse and execute them + # if self.tools: + # self.parse_and_execute_tools(response) + if self.interactive: print(f"AI: {response}") history.append(f"AI: {response}") diff --git a/swarms/swarms/autobloggen.py b/swarms/swarms/autobloggen.py index 12b0d3b9..6c3c6bf1 100644 --- a/swarms/swarms/autobloggen.py +++ b/swarms/swarms/autobloggen.py @@ -25,7 +25,7 @@ class AutoBlogGenSwarm: Topic Selection Agent: - Generate 10 topics on gaining mental clarity using Taosim and Christian meditation - + Draft Agent: - Write a 100% unique, creative and in human-like style article of a minimum of 5,000 words using headings and sub-headings. @@ -42,8 +42,9 @@ class AutoBlogGenSwarm: swarm.run() ``` - + """ + def __init__( self, llm, @@ -62,7 +63,6 @@ class AutoBlogGenSwarm: self.max_retries = max_retries self.retry_attempts = retry_attempts - def print_beautifully(self, subheader: str, text: str): """Prints the text beautifully""" print( @@ -81,9 +81,9 @@ class AutoBlogGenSwarm: def social_media_prompt(self, article: str): """Gets the social media prompt""" - prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace("{{ARTICLE}}", article).replace( - "{{GOAL}}", self.objective - ) + prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace( + "{{ARTICLE}}", article + ).replace("{{GOAL}}", self.objective) return prompt def get_review_prompt(self, article: str): @@ -91,7 +91,6 @@ class AutoBlogGenSwarm: prompt = REVIEW_PROMPT.replace("{{ARTICLE}}", article) return prompt - def step(self): """Steps through the task""" topic_selection_agent = self.llm(self.topic_selection_agent_prompt) @@ -107,16 +106,14 @@ class AutoBlogGenSwarm: review_agent = self.print_beautifully("Review Agent", review_agent) # Agent that publishes on social media - distribution_agent = self.llm( - self.social_media_prompt(article=review_agent) - ) + distribution_agent = self.llm(self.social_media_prompt(article=review_agent)) distribution_agent = self.print_beautifully( "Distribution Agent", distribution_agent ) def run(self): """Runs the swarm""" - for attempt in range(self.retry_attempts): + for attempt in range(self.retry_attempts): try: for i in range(self.iterations): self.step() @@ -124,13 +121,13 @@ class AutoBlogGenSwarm: print(colored(f"Error while running AutoBlogGenSwarm {error}", "red")) if attempt == self.retry_attempts - 1: raise - + def update_task(self, new_task: str): """ Updates the task of the swarm Args: new_task (str): New task to be performed by the swarm - + """ self.topic_selection_agent = new_task diff --git a/swarms/tools/tool.py b/swarms/tools/tool.py index 9c74a63a..a5ad3f75 100644 --- a/swarms/tools/tool.py +++ b/swarms/tools/tool.py @@ -29,6 +29,7 @@ from pydantic import ( ) from langchain.schema.runnable import Runnable, RunnableConfig, RunnableSerializable + class SchemaAnnotationError(TypeError): """Raised when 'args_schema' is missing or has an incorrect type annotation."""