From 1df42a399124827e756e81a9c82e59cdc3f8a5ff Mon Sep 17 00:00:00 2001 From: Kye Date: Mon, 1 Jan 2024 15:38:52 -0500 Subject: [PATCH] [BUGFIX][Conversation] [swarm.tools] --- scripts/auto_tests_docs/auto_docs.py | 133 +++++++++++------- scripts/auto_tests_docs/auto_docs_omni.py | 133 ++++++++++++++++++ .../agents/simple_agent.py => simple_agent.py | 24 +++- swarms/models/openai_models.py | 4 +- swarms/structs/__init__.py | 4 +- swarms/structs/conversation.py | 2 +- swarms/structs/utils.py | 19 +++ swarms/telemetry/__init__.py | 26 +++- swarms/tools/__init__.py | 22 ++- swarms/tools/code_executor.py | 111 +++++++++++++++ swarms/tools/tool_utils.py | 15 +- 11 files changed, 427 insertions(+), 66 deletions(-) create mode 100644 scripts/auto_tests_docs/auto_docs_omni.py rename playground/agents/simple_agent.py => simple_agent.py (60%) create mode 100644 swarms/tools/code_executor.py diff --git a/scripts/auto_tests_docs/auto_docs.py b/scripts/auto_tests_docs/auto_docs.py index 5df0f63d..627f77ef 100644 --- a/scripts/auto_tests_docs/auto_docs.py +++ b/scripts/auto_tests_docs/auto_docs.py @@ -2,27 +2,39 @@ import inspect import os import threading -from zeta import OpenAIChat +from swarms import OpenAIChat from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP -from zeta.nn.modules._activations import ( - AccurateGELUActivation, - ClippedGELUActivation, - FastGELUActivation, - GELUActivation, - LaplaceActivation, - LinearActivation, - MishActivation, - NewGELUActivation, - PytorchGELUTanh, - QuickGELUActivation, - ReLUSquaredActivation, + +from swarms.structs.agent import Agent +from swarms.structs.autoscaler import AutoScaler +from swarms.structs.base import BaseStructure +from swarms.structs.base_swarm import AbstractSwarm +from swarms.structs.base_workflow import BaseWorkflow +from swarms.structs.concurrent_workflow import ConcurrentWorkflow +from swarms.structs.conversation import Conversation +from swarms.structs.groupchat import GroupChat, GroupChatManager +from swarms.structs.model_parallizer import ModelParallelizer +from swarms.structs.multi_agent_collab import MultiAgentCollaboration +from swarms.structs.nonlinear_workflow import NonlinearWorkflow +from swarms.structs.recursive_workflow import RecursiveWorkflow +from swarms.structs.schemas import ( + Artifact, + ArtifactUpload, + StepInput, + TaskInput, +) +from swarms.structs.sequential_workflow import SequentialWorkflow +from swarms.structs.swarm_net import SwarmNetwork +from swarms.structs.utils import ( + distribute_tasks, + extract_key_from_json, + extract_tokens_from_text, + find_agent_by_id, + find_token_in_text, + parse_tasks, ) -from zeta.nn.modules.dense_connect import DenseBlock -from zeta.nn.modules.dual_path_block import DualPathBlock -from zeta.nn.modules.feedback_block import FeedbackBlock -from zeta.nn.modules.highway_layer import HighwayLayer -from zeta.nn.modules.multi_scale_block import MultiScaleBlock -from zeta.nn.modules.recursive_block import RecursiveBlock + + from dotenv import load_dotenv load_dotenv() @@ -36,59 +48,76 @@ model = OpenAIChat( ) -def process_documentation(cls): +def process_documentation( + item, + module: str = "swarms.structs", + docs_folder_path: str = "docs/swarms/structs", +): """ - Process the documentation for a given class using OpenAI model and save it in a Markdown file. + Process the documentation for a given class or function using OpenAI model and save it in a Python file. """ - doc = inspect.getdoc(cls) - source = inspect.getsource(cls) + doc = inspect.getdoc(item) + source = inspect.getsource(item) + is_class = inspect.isclass(item) + item_type = "Class Name" if is_class else "Name" input_content = ( - "Class Name:" - f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource" + f"{item_type}:" + f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource" f" Code:\n{source}" ) - print(input_content) - # Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content) + # Process with OpenAI model processed_content = model( - DOCUMENTATION_WRITER_SOP(input_content, "zeta") + DOCUMENTATION_WRITER_SOP(input_content, module) ) - doc_content = f"# {cls.__name__}\n\n{processed_content}\n" + doc_content = f"# {item.__name__}\n\n{processed_content}\n" # Create the directory if it doesn't exist - dir_path = "docs/zeta/nn/modules" + dir_path = docs_folder_path os.makedirs(dir_path, exist_ok=True) - # Write the processed documentation to a Markdown file - file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.md") + # Write the processed documentation to a Python file + file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md") with open(file_path, "w") as file: file.write(doc_content) + print( + f"Processed documentation for {item.__name__}. at {file_path}" + ) + def main(): - classes = [ - DenseBlock, - HighwayLayer, - MultiScaleBlock, - FeedbackBlock, - DualPathBlock, - RecursiveBlock, - PytorchGELUTanh, - NewGELUActivation, - GELUActivation, - FastGELUActivation, - QuickGELUActivation, - ClippedGELUActivation, - AccurateGELUActivation, - MishActivation, - LinearActivation, - LaplaceActivation, - ReLUSquaredActivation, + items = [ + Agent, + SequentialWorkflow, + AutoScaler, + Conversation, + TaskInput, + Artifact, + ArtifactUpload, + StepInput, + SwarmNetwork, + ModelParallelizer, + MultiAgentCollaboration, + AbstractSwarm, + GroupChat, + GroupChatManager, + parse_tasks, + find_agent_by_id, + distribute_tasks, + find_token_in_text, + extract_key_from_json, + extract_tokens_from_text, + ConcurrentWorkflow, + RecursiveWorkflow, + NonlinearWorkflow, + BaseWorkflow, + BaseStructure, ] threads = [] - for cls in classes: + for cls in items: thread = threading.Thread( target=process_documentation, args=(cls,) ) @@ -100,7 +129,7 @@ def main(): thread.join() print( - "Documentation generated in 'docs/zeta/nn/modules' directory." + "Documentation generated in 'docs/swarms/structs' directory." ) diff --git a/scripts/auto_tests_docs/auto_docs_omni.py b/scripts/auto_tests_docs/auto_docs_omni.py new file mode 100644 index 00000000..fbd80b6a --- /dev/null +++ b/scripts/auto_tests_docs/auto_docs_omni.py @@ -0,0 +1,133 @@ +import inspect +import os +import threading + +from dotenv import load_dotenv +from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP +from swarms import OpenAIChat +from swarms.structs.agent import Agent +from swarms.structs.autoscaler import AutoScaler +from swarms.structs.base import BaseStructure +from swarms.structs.base_swarm import AbstractSwarm +from swarms.structs.base_workflow import BaseWorkflow +from swarms.structs.concurrent_workflow import ConcurrentWorkflow +from swarms.structs.conversation import Conversation +from swarms.structs.groupchat import GroupChat, GroupChatManager +from swarms.structs.model_parallizer import ModelParallelizer +from swarms.structs.multi_agent_collab import MultiAgentCollaboration +from swarms.structs.nonlinear_workflow import NonlinearWorkflow +from swarms.structs.recursive_workflow import RecursiveWorkflow +from swarms.structs.schemas import ( + Artifact, + ArtifactUpload, + StepInput, + TaskInput, +) +from swarms.structs.sequential_workflow import SequentialWorkflow +from swarms.structs.swarm_net import SwarmNetwork +from swarms.structs.utils import ( + distribute_tasks, + extract_key_from_json, + extract_tokens_from_text, + find_agent_by_id, + find_token_in_text, + parse_tasks, +) + + +load_dotenv() + +api_key = os.getenv("OPENAI_API_KEY") + +model = OpenAIChat( + model_name="gpt-4-1106-preview", + openai_api_key=api_key, + max_tokens=4000, +) + + +def process_documentation( + item, + module: str = "swarms.structs", + docs_folder_path: str = "docs/swarms/structs", +): + """ + Process the documentation for a given class or function using OpenAI model and save it in a Python file. + """ + doc = inspect.getdoc(item) + source = inspect.getsource(item) + is_class = inspect.isclass(item) + item_type = "Class Name" if is_class else "Name" + input_content = ( + f"{item_type}:" + f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource" + f" Code:\n{source}" + ) + + # Process with OpenAI model + processed_content = model( + DOCUMENTATION_WRITER_SOP(input_content, module) + ) + + doc_content = f"# {item.__name__}\n\n{processed_content}\n" + + # Create the directory if it doesn't exist + dir_path = docs_folder_path + os.makedirs(dir_path, exist_ok=True) + + # Write the processed documentation to a Python file + file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md") + with open(file_path, "w") as file: + file.write(doc_content) + + print( + f"Processed documentation for {item.__name__}. at {file_path}" + ) + + +def main(module: str = "docs/swarms/structs"): + items = [ + Agent, + SequentialWorkflow, + AutoScaler, + Conversation, + TaskInput, + Artifact, + ArtifactUpload, + StepInput, + SwarmNetwork, + ModelParallelizer, + MultiAgentCollaboration, + AbstractSwarm, + GroupChat, + GroupChatManager, + parse_tasks, + find_agent_by_id, + distribute_tasks, + find_token_in_text, + extract_key_from_json, + extract_tokens_from_text, + ConcurrentWorkflow, + RecursiveWorkflow, + NonlinearWorkflow, + BaseWorkflow, + BaseStructure, + ] + + threads = [] + for item in items: + thread = threading.Thread( + target=process_documentation, args=(item,) + ) + threads.append(thread) + thread.start() + + # Wait for all threads to complete + for thread in threads: + thread.join() + + print(f"Documentation generated in {module} directory.") + + +if __name__ == "__main__": + main() diff --git a/playground/agents/simple_agent.py b/simple_agent.py similarity index 60% rename from playground/agents/simple_agent.py rename to simple_agent.py index dd46083b..5d9d57ed 100644 --- a/playground/agents/simple_agent.py +++ b/simple_agent.py @@ -5,9 +5,14 @@ from dotenv import load_dotenv from swarms import ( OpenAIChat, Conversation, + detect_markdown, + extract_code_from_markdown, ) +from swarms.tools.code_executor import CodeExecutor + conv = Conversation( + autosave=False, time_enabled=True, ) @@ -18,7 +23,7 @@ load_dotenv() api_key = os.environ.get("OPENAI_API_KEY") # Initialize the language model -llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4") +llm = OpenAIChat(openai_api_key=api_key) # Run the language model in a loop @@ -27,18 +32,33 @@ def interactive_conversation(llm, iters: int = 10): for i in range(iters): user_input = input("User: ") conv.add("user", user_input) + if user_input.lower() == "quit": break + task = ( conv.return_history_as_string() ) # Get the conversation history + + # Run the language model out = llm(task) conv.add("assistant", out) print( f"Assistant: {out}", ) + + # Code Interpreter + if detect_markdown(out): + code = extract_code_from_markdown(out) + if code: + print(f"Code: {code}") + executor = CodeExecutor() + out = executor.run(code) + conv.add("assistant", out) + # print(f"Assistant: {out}") + conv.display_conversation() - conv.export_conversation("conversation.txt") + # conv.export_conversation("conversation.txt") # Replace with your LLM instance diff --git a/swarms/models/openai_models.py b/swarms/models/openai_models.py index 14332ff2..f13657dc 100644 --- a/swarms/models/openai_models.py +++ b/swarms/models/openai_models.py @@ -189,7 +189,9 @@ class BaseOpenAI(BaseLLM): return True client: Any = None #: :meta private: - model_name: str = Field(default="text-davinci-003", alias="model") + model_name: str = Field( + default="gpt-4-1106-preview", alias="model" + ) """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index e589f1a8..80d23d7c 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -25,6 +25,7 @@ from swarms.structs.utils import ( find_agent_by_id, find_token_in_text, parse_tasks, + detect_markdown, ) __all__ = [ @@ -52,5 +53,6 @@ __all__ = [ "RecursiveWorkflow", "NonlinearWorkflow", "BaseWorkflow", - "BaseStructure" + "BaseStructure", + "detect_markdown", ] diff --git a/swarms/structs/conversation.py b/swarms/structs/conversation.py index ccb346e6..392e83d3 100644 --- a/swarms/structs/conversation.py +++ b/swarms/structs/conversation.py @@ -31,7 +31,7 @@ class Conversation(BaseStructure): time_enabled: bool = False, database: AbstractDatabase = None, autosave: bool = True, - save_filepath: str = "/runs/conversation.json", + save_filepath: str = "runs/conversation.json", *args, **kwargs, ): diff --git a/swarms/structs/utils.py b/swarms/structs/utils.py index 3afb5fea..634021e2 100644 --- a/swarms/structs/utils.py +++ b/swarms/structs/utils.py @@ -1,3 +1,4 @@ +import re import json from typing import Dict, Any, List, Optional from swarms.structs.agent import Agent @@ -118,3 +119,21 @@ def extract_tokens_from_text( List[str]: The tokens that were found in the text. """ return [token for token in tokens if token in text] + + +def detect_markdown(text: str) -> bool: + """ + Checks if a string contains Markdown code enclosed in six backticks. + + Parameters + ---------- + text : str + The text to check. + + Returns + ------- + bool + True if the text contains Markdown code enclosed in six backticks, False otherwise. + """ + pattern = r"``````[\s\S]*?``````" + return bool(re.search(pattern, text)) diff --git a/swarms/telemetry/__init__.py b/swarms/telemetry/__init__.py index 467a3b61..0a16ca28 100644 --- a/swarms/telemetry/__init__.py +++ b/swarms/telemetry/__init__.py @@ -1,20 +1,36 @@ from swarms.telemetry.log_all import log_all_calls, log_calls - -# from swarms.telemetry.posthog_utils import log_activity_posthog +from swarms.telemetry.sys_info import ( + get_cpu_info, + get_oi_version, + get_os_version, + get_package_mismatches, + get_pip_version, + get_python_version, + get_ram_info, + interpreter_info, + system_info, +) from swarms.telemetry.user_utils import ( + generate_unique_identifier, generate_user_id, get_machine_id, get_system_info, - generate_unique_identifier, ) - __all__ = [ "log_all_calls", "log_calls", - # "log_activity_posthog", "generate_user_id", "get_machine_id", "get_system_info", "generate_unique_identifier", + "get_python_version", + "get_pip_version", + "get_oi_version", + "get_os_version", + "get_cpu_info", + "get_ram_info", + "get_package_mismatches", + "interpreter_info", + "system_info", ] diff --git a/swarms/tools/__init__.py b/swarms/tools/__init__.py index 65153867..877fb0de 100644 --- a/swarms/tools/__init__.py +++ b/swarms/tools/__init__.py @@ -1,3 +1,23 @@ from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs +from swarms.tools.code_executor import CodeExecutor +from swarms.tools.tool_utils import ( + tool_find_by_name, + extract_tool_commands, + parse_and_execute_tools, + execute_tools, +) +from swarms.tools.tool import BaseTool, Tool, StructuredTool, tool -__all__ = ["scrape_tool_func_docs"] + +__all__ = [ + "scrape_tool_func_docs", + "CodeExecutor", + "tool_find_by_name", + "extract_tool_commands", + "parse_and_execute_tools", + "execute_tools", + "BaseTool", + "Tool", + "StructuredTool", + "tool", +] diff --git a/swarms/tools/code_executor.py b/swarms/tools/code_executor.py new file mode 100644 index 00000000..3c369dae --- /dev/null +++ b/swarms/tools/code_executor.py @@ -0,0 +1,111 @@ +import os +import tempfile +import subprocess + + +class CodeExecutor: + """ + A class for executing code snippets. + + Args: + code (str, optional): The code snippet to be executed. Defaults to None. + + Methods: + is_python_code(code: str = None) -> bool: + Checks if the given code is Python code. + + run_python(code: str = None) -> str: + Executes the given Python code and returns the output. + + run(code: str = None) -> str: + Executes the given code and returns the output. + + __call__() -> str: + Executes the code and returns the output. + """ + + def __init__(self, code: str = None): + self.code = code + + def is_python_code(self, code: str = None) -> bool: + """ + Checks if the given code is Python code. + + Args: + code (str, optional): The code to be checked. Defaults to None. + + Returns: + bool: True if the code is Python code, False otherwise. + """ + code = code or self.code + return code.strip().startswith("python") + + def run_python(self, code: str = None) -> str: + """ + Executes the given Python code and returns the output. + + Args: + code (str, optional): The Python code to be executed. Defaults to None. + + Returns: + str: The output of the code execution. + """ + code = code or self.code + try: + # Create a temporary file + with tempfile.NamedTemporaryFile( + suffix=".py", delete=False + ) as temp: + temp.write(code.encode()) + temp_filename = temp.name + + # Execute the temporary file + output = subprocess.check_output( + f"python {temp_filename}", + shell=True, + ) + + # Delete the temporary file + os.remove(temp_filename) + + return output.decode("utf-8") + except subprocess.CalledProcessError as error: + return error.output.decode("utf-8") + except Exception as error: + return str(error) + + def run(self, code: str = None) -> str: + """ + Executes the given code and returns the output. + + Args: + code (str, optional): The code to be executed. Defaults to None. + + Returns: + str: The output of the code execution. + """ + code = code or self.code + try: + output = subprocess.check_output( + code, + shell=True, + ) + return output.decode("utf-8") + except subprocess.CalledProcessError as e: + return e.output.decode("utf-8") + except Exception as e: + return str(e) + + def __call__(self) -> str: + """ + Executes the code and returns the output. + + Returns: + str: The output of the code execution. + """ + return self.run() + + +# model = CodeExecutor() +# out = model.run("python3") +# print(out) diff --git a/swarms/tools/tool_utils.py b/swarms/tools/tool_utils.py index c189c9f5..da13e223 100644 --- a/swarms/tools/tool_utils.py +++ b/swarms/tools/tool_utils.py @@ -1,8 +1,17 @@ import re import json +from typing import List, Any -def extract_tool_commands(self, text: str): +def tool_find_by_name(tool_name: str, tools: List[Any]): + """Find the tool by name""" + for tool in tools: + if tool.name == tool_name: + return tool + return None + + +def extract_tool_commands(text: str): """ Extract the tool commands from the text @@ -39,9 +48,9 @@ def parse_and_execute_tools(response: str): execute_tools(tool_name, params) -def execute_tools(self, tool_name, params): +def execute_tools(tool_name, params): """Execute the tool with the provided params""" - tool = self.tool_find_by_name(tool_name) + tool = tool_find_by_name(tool_name) if tool: # Execute the tool with the provided parameters tool_result = tool.run(**params)