[BUGFIX][Conversation] [swarm.tools]

pull/343/head
Kye 1 year ago
parent 3327e463c6
commit 1df42a3991

@ -2,27 +2,39 @@
import inspect
import os
import threading
from zeta import OpenAIChat
from swarms import OpenAIChat
from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from zeta.nn.modules._activations import (
AccurateGELUActivation,
ClippedGELUActivation,
FastGELUActivation,
GELUActivation,
LaplaceActivation,
LinearActivation,
MishActivation,
NewGELUActivation,
PytorchGELUTanh,
QuickGELUActivation,
ReLUSquaredActivation,
from swarms.structs.agent import Agent
from swarms.structs.autoscaler import AutoScaler
from swarms.structs.base import BaseStructure
from swarms.structs.base_swarm import AbstractSwarm
from swarms.structs.base_workflow import BaseWorkflow
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.conversation import Conversation
from swarms.structs.groupchat import GroupChat, GroupChatManager
from swarms.structs.model_parallizer import ModelParallelizer
from swarms.structs.multi_agent_collab import MultiAgentCollaboration
from swarms.structs.nonlinear_workflow import NonlinearWorkflow
from swarms.structs.recursive_workflow import RecursiveWorkflow
from swarms.structs.schemas import (
Artifact,
ArtifactUpload,
StepInput,
TaskInput,
)
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.swarm_net import SwarmNetwork
from swarms.structs.utils import (
distribute_tasks,
extract_key_from_json,
extract_tokens_from_text,
find_agent_by_id,
find_token_in_text,
parse_tasks,
)
from zeta.nn.modules.dense_connect import DenseBlock
from zeta.nn.modules.dual_path_block import DualPathBlock
from zeta.nn.modules.feedback_block import FeedbackBlock
from zeta.nn.modules.highway_layer import HighwayLayer
from zeta.nn.modules.multi_scale_block import MultiScaleBlock
from zeta.nn.modules.recursive_block import RecursiveBlock
from dotenv import load_dotenv
load_dotenv()
@ -36,59 +48,76 @@ model = OpenAIChat(
)
def process_documentation(cls):
def process_documentation(
item,
module: str = "swarms.structs",
docs_folder_path: str = "docs/swarms/structs",
):
"""
Process the documentation for a given class using OpenAI model and save it in a Markdown file.
Process the documentation for a given class or function using OpenAI model and save it in a Python file.
"""
doc = inspect.getdoc(cls)
source = inspect.getsource(cls)
doc = inspect.getdoc(item)
source = inspect.getsource(item)
is_class = inspect.isclass(item)
item_type = "Class Name" if is_class else "Name"
input_content = (
"Class Name:"
f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f"{item_type}:"
f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
print(input_content)
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
# Process with OpenAI model
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, "zeta")
DOCUMENTATION_WRITER_SOP(input_content, module)
)
doc_content = f"# {cls.__name__}\n\n{processed_content}\n"
doc_content = f"# {item.__name__}\n\n{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = "docs/zeta/nn/modules"
dir_path = docs_folder_path
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.md")
# Write the processed documentation to a Python file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)
print(
f"Processed documentation for {item.__name__}. at {file_path}"
)
def main():
classes = [
DenseBlock,
HighwayLayer,
MultiScaleBlock,
FeedbackBlock,
DualPathBlock,
RecursiveBlock,
PytorchGELUTanh,
NewGELUActivation,
GELUActivation,
FastGELUActivation,
QuickGELUActivation,
ClippedGELUActivation,
AccurateGELUActivation,
MishActivation,
LinearActivation,
LaplaceActivation,
ReLUSquaredActivation,
items = [
Agent,
SequentialWorkflow,
AutoScaler,
Conversation,
TaskInput,
Artifact,
ArtifactUpload,
StepInput,
SwarmNetwork,
ModelParallelizer,
MultiAgentCollaboration,
AbstractSwarm,
GroupChat,
GroupChatManager,
parse_tasks,
find_agent_by_id,
distribute_tasks,
find_token_in_text,
extract_key_from_json,
extract_tokens_from_text,
ConcurrentWorkflow,
RecursiveWorkflow,
NonlinearWorkflow,
BaseWorkflow,
BaseStructure,
]
threads = []
for cls in classes:
for cls in items:
thread = threading.Thread(
target=process_documentation, args=(cls,)
)
@ -100,7 +129,7 @@ def main():
thread.join()
print(
"Documentation generated in 'docs/zeta/nn/modules' directory."
"Documentation generated in 'docs/swarms/structs' directory."
)

@ -0,0 +1,133 @@
import inspect
import os
import threading
from dotenv import load_dotenv
from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from swarms import OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.autoscaler import AutoScaler
from swarms.structs.base import BaseStructure
from swarms.structs.base_swarm import AbstractSwarm
from swarms.structs.base_workflow import BaseWorkflow
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.conversation import Conversation
from swarms.structs.groupchat import GroupChat, GroupChatManager
from swarms.structs.model_parallizer import ModelParallelizer
from swarms.structs.multi_agent_collab import MultiAgentCollaboration
from swarms.structs.nonlinear_workflow import NonlinearWorkflow
from swarms.structs.recursive_workflow import RecursiveWorkflow
from swarms.structs.schemas import (
Artifact,
ArtifactUpload,
StepInput,
TaskInput,
)
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.swarm_net import SwarmNetwork
from swarms.structs.utils import (
distribute_tasks,
extract_key_from_json,
extract_tokens_from_text,
find_agent_by_id,
find_token_in_text,
parse_tasks,
)
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
model_name="gpt-4-1106-preview",
openai_api_key=api_key,
max_tokens=4000,
)
def process_documentation(
item,
module: str = "swarms.structs",
docs_folder_path: str = "docs/swarms/structs",
):
"""
Process the documentation for a given class or function using OpenAI model and save it in a Python file.
"""
doc = inspect.getdoc(item)
source = inspect.getsource(item)
is_class = inspect.isclass(item)
item_type = "Class Name" if is_class else "Name"
input_content = (
f"{item_type}:"
f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# Process with OpenAI model
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, module)
)
doc_content = f"# {item.__name__}\n\n{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = docs_folder_path
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Python file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)
print(
f"Processed documentation for {item.__name__}. at {file_path}"
)
def main(module: str = "docs/swarms/structs"):
items = [
Agent,
SequentialWorkflow,
AutoScaler,
Conversation,
TaskInput,
Artifact,
ArtifactUpload,
StepInput,
SwarmNetwork,
ModelParallelizer,
MultiAgentCollaboration,
AbstractSwarm,
GroupChat,
GroupChatManager,
parse_tasks,
find_agent_by_id,
distribute_tasks,
find_token_in_text,
extract_key_from_json,
extract_tokens_from_text,
ConcurrentWorkflow,
RecursiveWorkflow,
NonlinearWorkflow,
BaseWorkflow,
BaseStructure,
]
threads = []
for item in items:
thread = threading.Thread(
target=process_documentation, args=(item,)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print(f"Documentation generated in {module} directory.")
if __name__ == "__main__":
main()

@ -5,9 +5,14 @@ from dotenv import load_dotenv
from swarms import (
OpenAIChat,
Conversation,
detect_markdown,
extract_code_from_markdown,
)
from swarms.tools.code_executor import CodeExecutor
conv = Conversation(
autosave=False,
time_enabled=True,
)
@ -18,7 +23,7 @@ load_dotenv()
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4")
llm = OpenAIChat(openai_api_key=api_key)
# Run the language model in a loop
@ -27,18 +32,33 @@ def interactive_conversation(llm, iters: int = 10):
for i in range(iters):
user_input = input("User: ")
conv.add("user", user_input)
if user_input.lower() == "quit":
break
task = (
conv.return_history_as_string()
) # Get the conversation history
# Run the language model
out = llm(task)
conv.add("assistant", out)
print(
f"Assistant: {out}",
)
# Code Interpreter
if detect_markdown(out):
code = extract_code_from_markdown(out)
if code:
print(f"Code: {code}")
executor = CodeExecutor()
out = executor.run(code)
conv.add("assistant", out)
# print(f"Assistant: {out}")
conv.display_conversation()
conv.export_conversation("conversation.txt")
# conv.export_conversation("conversation.txt")
# Replace with your LLM instance

@ -189,7 +189,9 @@ class BaseOpenAI(BaseLLM):
return True
client: Any = None #: :meta private:
model_name: str = Field(default="text-davinci-003", alias="model")
model_name: str = Field(
default="gpt-4-1106-preview", alias="model"
)
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""

@ -25,6 +25,7 @@ from swarms.structs.utils import (
find_agent_by_id,
find_token_in_text,
parse_tasks,
detect_markdown,
)
__all__ = [
@ -52,5 +53,6 @@ __all__ = [
"RecursiveWorkflow",
"NonlinearWorkflow",
"BaseWorkflow",
"BaseStructure"
"BaseStructure",
"detect_markdown",
]

@ -31,7 +31,7 @@ class Conversation(BaseStructure):
time_enabled: bool = False,
database: AbstractDatabase = None,
autosave: bool = True,
save_filepath: str = "/runs/conversation.json",
save_filepath: str = "runs/conversation.json",
*args,
**kwargs,
):

@ -1,3 +1,4 @@
import re
import json
from typing import Dict, Any, List, Optional
from swarms.structs.agent import Agent
@ -118,3 +119,21 @@ def extract_tokens_from_text(
List[str]: The tokens that were found in the text.
"""
return [token for token in tokens if token in text]
def detect_markdown(text: str) -> bool:
"""
Checks if a string contains Markdown code enclosed in six backticks.
Parameters
----------
text : str
The text to check.
Returns
-------
bool
True if the text contains Markdown code enclosed in six backticks, False otherwise.
"""
pattern = r"``````[\s\S]*?``````"
return bool(re.search(pattern, text))

@ -1,20 +1,36 @@
from swarms.telemetry.log_all import log_all_calls, log_calls
# from swarms.telemetry.posthog_utils import log_activity_posthog
from swarms.telemetry.sys_info import (
get_cpu_info,
get_oi_version,
get_os_version,
get_package_mismatches,
get_pip_version,
get_python_version,
get_ram_info,
interpreter_info,
system_info,
)
from swarms.telemetry.user_utils import (
generate_unique_identifier,
generate_user_id,
get_machine_id,
get_system_info,
generate_unique_identifier,
)
__all__ = [
"log_all_calls",
"log_calls",
# "log_activity_posthog",
"generate_user_id",
"get_machine_id",
"get_system_info",
"generate_unique_identifier",
"get_python_version",
"get_pip_version",
"get_oi_version",
"get_os_version",
"get_cpu_info",
"get_ram_info",
"get_package_mismatches",
"interpreter_info",
"system_info",
]

@ -1,3 +1,23 @@
from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs
from swarms.tools.code_executor import CodeExecutor
from swarms.tools.tool_utils import (
tool_find_by_name,
extract_tool_commands,
parse_and_execute_tools,
execute_tools,
)
from swarms.tools.tool import BaseTool, Tool, StructuredTool, tool
__all__ = ["scrape_tool_func_docs"]
__all__ = [
"scrape_tool_func_docs",
"CodeExecutor",
"tool_find_by_name",
"extract_tool_commands",
"parse_and_execute_tools",
"execute_tools",
"BaseTool",
"Tool",
"StructuredTool",
"tool",
]

@ -0,0 +1,111 @@
import os
import tempfile
import subprocess
class CodeExecutor:
"""
A class for executing code snippets.
Args:
code (str, optional): The code snippet to be executed. Defaults to None.
Methods:
is_python_code(code: str = None) -> bool:
Checks if the given code is Python code.
run_python(code: str = None) -> str:
Executes the given Python code and returns the output.
run(code: str = None) -> str:
Executes the given code and returns the output.
__call__() -> str:
Executes the code and returns the output.
"""
def __init__(self, code: str = None):
self.code = code
def is_python_code(self, code: str = None) -> bool:
"""
Checks if the given code is Python code.
Args:
code (str, optional): The code to be checked. Defaults to None.
Returns:
bool: True if the code is Python code, False otherwise.
"""
code = code or self.code
return code.strip().startswith("python")
def run_python(self, code: str = None) -> str:
"""
Executes the given Python code and returns the output.
Args:
code (str, optional): The Python code to be executed. Defaults to None.
Returns:
str: The output of the code execution.
"""
code = code or self.code
try:
# Create a temporary file
with tempfile.NamedTemporaryFile(
suffix=".py", delete=False
) as temp:
temp.write(code.encode())
temp_filename = temp.name
# Execute the temporary file
output = subprocess.check_output(
f"python {temp_filename}",
shell=True,
)
# Delete the temporary file
os.remove(temp_filename)
return output.decode("utf-8")
except subprocess.CalledProcessError as error:
return error.output.decode("utf-8")
except Exception as error:
return str(error)
def run(self, code: str = None) -> str:
"""
Executes the given code and returns the output.
Args:
code (str, optional): The code to be executed. Defaults to None.
Returns:
str: The output of the code execution.
"""
code = code or self.code
try:
output = subprocess.check_output(
code,
shell=True,
)
return output.decode("utf-8")
except subprocess.CalledProcessError as e:
return e.output.decode("utf-8")
except Exception as e:
return str(e)
def __call__(self) -> str:
"""
Executes the code and returns the output.
Returns:
str: The output of the code execution.
"""
return self.run()
# model = CodeExecutor()
# out = model.run("python3")
# print(out)

@ -1,8 +1,17 @@
import re
import json
from typing import List, Any
def extract_tool_commands(self, text: str):
def tool_find_by_name(tool_name: str, tools: List[Any]):
"""Find the tool by name"""
for tool in tools:
if tool.name == tool_name:
return tool
return None
def extract_tool_commands(text: str):
"""
Extract the tool commands from the text
@ -39,9 +48,9 @@ def parse_and_execute_tools(response: str):
execute_tools(tool_name, params)
def execute_tools(self, tool_name, params):
def execute_tools(tool_name, params):
"""Execute the tool with the provided params"""
tool = self.tool_find_by_name(tool_name)
tool = tool_find_by_name(tool_name)
if tool:
# Execute the tool with the provided parameters
tool_result = tool.run(**params)

Loading…
Cancel
Save