diff --git a/block.py b/block.py new file mode 100644 index 00000000..c5a76910 --- /dev/null +++ b/block.py @@ -0,0 +1,97 @@ +import os + +from dotenv import load_dotenv +from transformers import AutoModelForCausalLM, AutoTokenizer + +# Import the models, structs, and telemetry modules +from swarms import ( + Gemini, + GPT4VisionAPI, + Mixtral, + OpenAI, + ToolAgent, + BlocksList, +) + +# Load the environment variables +load_dotenv() + +# Get the environment variables +openai_api_key = os.getenv("OPENAI_API_KEY") +gemini_api_key = os.getenv("GEMINI_API_KEY") + +# Tool Agent +model = AutoModelForCausalLM.from_pretrained( + "databricks/dolly-v2-12b" +) +tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b") +json_schema = { + "type": "object", + "properties": { + "name": {"type": "string"}, + "age": {"type": "number"}, + "is_student": {"type": "boolean"}, + "courses": {"type": "array", "items": {"type": "string"}}, + }, +} +toolagent = ToolAgent( + model=model, tokenizer=tokenizer, json_schema=json_schema +) + +# Blocks List which enables you to build custom swarms by adding classes or functions +swarm = BlocksList( + "SocialMediaSwarm", + "A swarm of social media agents", + [ + OpenAI(openai_api_key=openai_api_key), + Mixtral(), + GPT4VisionAPI(openai_api_key=openai_api_key), + Gemini(gemini_api_key=gemini_api_key), + ], +) + + +# Add the new block to the swarm +swarm.add(toolagent) + +# Remove a block from the swarm +swarm.remove(toolagent) + +# Update a block in the swarm +swarm.update(toolagent) + +# Get a block at a specific index +block_at_index = swarm.get(0) + +# Get all blocks in the swarm +all_blocks = swarm.get_all() + +# Get blocks by name +openai_blocks = swarm.get_by_name("OpenAI") + +# Get blocks by type +gpt4_blocks = swarm.get_by_type("GPT4VisionAPI") + +# Get blocks by ID +block_by_id = swarm.get_by_id(toolagent.id) + +# Get blocks by parent +blocks_by_parent = swarm.get_by_parent(swarm) + +# Get blocks by parent ID +blocks_by_parent_id = swarm.get_by_parent_id(swarm.id) + +# Get blocks by parent name +blocks_by_parent_name = swarm.get_by_parent_name(swarm.name) + +# Get blocks by parent type +blocks_by_parent_type = swarm.get_by_parent_type(type(swarm).__name__) + +# Get blocks by parent description +blocks_by_parent_description = swarm.get_by_parent_description( + swarm.description +) + +# Run the block in the swarm +inference = swarm.run_block(toolagent, "Hello World") +print(inference) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index eb39f71a..a2f10869 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,13 +15,13 @@ Pillow==9.4.0 faiss-cpu==1.7.4 openai==0.28.0 attrs==22.2.0 -datasets==2.10.1 +datasets==2.14.5 pydantic==1.10.12 bitsandbytes soundfile==0.12.1 arize-phoenix weaviate-client==3.25.3 -huggingface-hub==0.16.4 +huggingface-hub==0.19.3 google-generativeai==0.3.1 sentencepiece==0.1.98 requests_mock diff --git a/scripts/auto_tests_docs/auto_docs.py b/scripts/auto_tests_docs/auto_docs.py index 8d441ff3..cc3d8feb 100644 --- a/scripts/auto_tests_docs/auto_docs.py +++ b/scripts/auto_tests_docs/auto_docs.py @@ -25,6 +25,39 @@ from swarms.structs.schemas import ( ) #################### + +from swarms.structs.agent import Agent +from swarms.structs.autoscaler import AutoScaler +from swarms.structs.base import BaseStructure +from swarms.structs.base_swarm import AbstractSwarm +from swarms.structs.base_workflow import BaseWorkflow +from swarms.structs.concurrent_workflow import ConcurrentWorkflow +from swarms.structs.conversation import Conversation +from swarms.structs.groupchat import GroupChat, GroupChatManager +from swarms.structs.model_parallizer import ModelParallelizer +from swarms.structs.multi_agent_collab import MultiAgentCollaboration +from swarms.structs.nonlinear_workflow import NonlinearWorkflow +from swarms.structs.recursive_workflow import RecursiveWorkflow +from swarms.structs.schemas import ( + Artifact, + ArtifactUpload, + StepInput, + TaskInput, +) +from swarms.structs.sequential_workflow import SequentialWorkflow +from swarms.structs.swarm_net import SwarmNetwork +from swarms.structs.utils import ( + distribute_tasks, + extract_key_from_json, + extract_tokens_from_text, + find_agent_by_id, + find_token_in_text, + parse_tasks, +) + + +from dotenv import load_dotenv + load_dotenv() api_key = os.getenv("OPENAI_API_KEY") @@ -35,19 +68,25 @@ model = OpenAIChat( ) -def process_documentation(cls): +def process_documentation( + item, + module: str = "swarms.structs", + docs_folder_path: str = "docs/swarms/structs", +): """ - Process the documentation for a given class using OpenAI model and save it in a Markdown file. + Process the documentation for a given class or function using OpenAI model and save it in a Python file. """ - doc = inspect.getdoc(cls) - source = inspect.getsource(cls) + doc = inspect.getdoc(item) + source = inspect.getsource(item) + is_class = inspect.isclass(item) + item_type = "Class Name" if is_class else "Name" input_content = ( - "Class Name:" - f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource" + f"{item_type}:" + f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource" f" Code:\n{source}" ) - # Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content) + # Process with OpenAI model processed_content = model( DOCUMENTATION_WRITER_SOP(input_content, "swarms.structs") ) @@ -59,8 +98,8 @@ def process_documentation(cls): dir_path = "docs/swarms/structs" os.makedirs(dir_path, exist_ok=True) - # Write the processed documentation to a Markdown file - file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.md") + # Write the processed documentation to a Python file + file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md") with open(file_path, "w") as file: file.write(doc_content) @@ -82,9 +121,35 @@ def main(): ArtifactUpload, StepInput, TaskInput, + items = [ + Agent, + SequentialWorkflow, + AutoScaler, + Conversation, + TaskInput, + Artifact, + ArtifactUpload, + StepInput, + SwarmNetwork, + ModelParallelizer, + MultiAgentCollaboration, + AbstractSwarm, + GroupChat, + GroupChatManager, + parse_tasks, + find_agent_by_id, + distribute_tasks, + find_token_in_text, + extract_key_from_json, + extract_tokens_from_text, + ConcurrentWorkflow, + RecursiveWorkflow, + NonlinearWorkflow, + BaseWorkflow, + BaseStructure, ] threads = [] - for cls in classes: + for cls in items: thread = threading.Thread( target=process_documentation, args=(cls,) ) diff --git a/playground/agents/simple_agent.py b/simple_agent.py similarity index 100% rename from playground/agents/simple_agent.py rename to simple_agent.py diff --git a/swarms/models/huggingface.py b/swarms/models/huggingface.py index 0c97e734..55f6d962 100644 --- a/swarms/models/huggingface.py +++ b/swarms/models/huggingface.py @@ -170,6 +170,7 @@ class HuggingfaceLLM(AbstractLLM): "bnb_4bit_use_double_quant": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": dtype, + "bnb_4bit_compute_dtype": dtype, } bnb_config = BitsAndBytesConfig(**quantization_config) @@ -189,6 +190,7 @@ class HuggingfaceLLM(AbstractLLM): self.model_id, *args, **kwargs ).to(self.device) + def print_error(self, error: str): """Print error""" print(colored(f"Error: {error}", "red")) @@ -263,7 +265,7 @@ class HuggingfaceLLM(AbstractLLM): *args, **kwargs, ) - + return self.tokenizer.decode( outputs[0], skip_special_tokens=True ) diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index c767ae7b..8ec53aae 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -1,3 +1,4 @@ +""" This module lists all the data structures used in the swarms package.""" from swarms.structs.agent import Agent from swarms.structs.autoscaler import AutoScaler from swarms.structs.base import BaseStructure @@ -54,9 +55,27 @@ __all__ = [ "ConcurrentWorkflow", "RecursiveWorkflow", "NonlinearWorkflow", +<<<<<<< HEAD + "BaseStruct", # from swarms/structs/base.py +======= "BaseWorkflow", +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> db2dbf3 ([CODE QUALITY]) +======= + "BaseStructure" +>>>>>>> a9b3d7d ([CLEANUP][__init__]) +======= "BaseStructure", "detect_markdown", +<<<<<<< HEAD +>>>>>>> 1df42a3 ([BUGFIX][Conversation] [swarm.tools]) +======= "Task", +<<<<<<< HEAD + "block" +>>>>>>> 8e1a024 ([FEATS] [BlockList] [BlockDict] [block]) +======= "block", +>>>>>>> 4055db3 ([CODE QUALITY]) ] diff --git a/swarms/structs/blockslist.py b/swarms/structs/blockslist.py index b2a4db08..8448454c 100644 --- a/swarms/structs/blockslist.py +++ b/swarms/structs/blockslist.py @@ -75,8 +75,8 @@ class BlocksList(BaseStructure): def get_all(self): return self.blocks - - def run_block(self, block: Any, task: str, *args, **kwargs): + + def run_block(self, block: Any, task: str, *args, **kwargs): """Run the block for the specified task. Args: diff --git a/swarms/telemetry/__init__.py b/swarms/telemetry/__init__.py index 0a16ca28..2e5b709a 100644 --- a/swarms/telemetry/__init__.py +++ b/swarms/telemetry/__init__.py @@ -1,3 +1,4 @@ +""" This module lists all the telemetry related functions. """ from swarms.telemetry.log_all import log_all_calls, log_calls from swarms.telemetry.sys_info import ( get_cpu_info, @@ -10,7 +11,19 @@ from swarms.telemetry.sys_info import ( interpreter_info, system_info, ) +from swarms.telemetry.sys_info import ( + get_cpu_info, + get_oi_version, + get_os_version, + get_package_mismatches, + get_pip_version, + get_python_version, + get_ram_info, + interpreter_info, + system_info, +) from swarms.telemetry.user_utils import ( + generate_unique_identifier, generate_unique_identifier, generate_user_id, get_machine_id, @@ -24,7 +37,11 @@ __all__ = [ "get_machine_id", "get_system_info", "generate_unique_identifier", +<<<<<<< HEAD + "get_python_version", # from swarms/telemetry/sys_info.py +======= "get_python_version", +>>>>>>> 1df42a3 ([BUGFIX][Conversation] [swarm.tools]) "get_pip_version", "get_oi_version", "get_os_version", diff --git a/swarms/telemetry/sys_info.py b/swarms/telemetry/sys_info.py index 08ad1db3..4939fc8a 100644 --- a/swarms/telemetry/sys_info.py +++ b/swarms/telemetry/sys_info.py @@ -1,158 +1,158 @@ -import platform -import subprocess - -import pkg_resources -import psutil -import toml - - -def get_python_version(): - return platform.python_version() - - -def get_pip_version(): - try: - pip_version = ( - subprocess.check_output(["pip", "--version"]) - .decode() - .split()[1] - ) - except Exception as e: - pip_version = str(e) - return pip_version - - -def get_oi_version(): - try: - oi_version_cmd = ( - subprocess.check_output(["interpreter", "--version"]) - .decode() - .split()[1] - ) - except Exception as e: - oi_version_cmd = str(e) - oi_version_pkg = pkg_resources.get_distribution( - "open-interpreter" - ).version - oi_version = oi_version_cmd, oi_version_pkg - return oi_version - - -def get_os_version(): - return platform.platform() - - -def get_cpu_info(): - return platform.processor() - - -def get_ram_info(): - vm = psutil.virtual_memory() - used_ram_gb = vm.used / (1024**3) - free_ram_gb = vm.free / (1024**3) - total_ram_gb = vm.total / (1024**3) - return ( - f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:" - f" {free_ram_gb:.2f}" - ) - - -def get_package_mismatches(file_path="pyproject.toml"): - with open(file_path, "r") as file: - pyproject = toml.load(file) - dependencies = pyproject["tool"]["poetry"]["dependencies"] - dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][ - "dependencies" - ] - dependencies.update(dev_dependencies) - - installed_packages = { - pkg.key: pkg.version for pkg in pkg_resources.working_set - } - - mismatches = [] - for package, version_info in dependencies.items(): - if isinstance(version_info, dict): - version_info = version_info["version"] - installed_version = installed_packages.get(package) - if installed_version and version_info.startswith("^"): - expected_version = version_info[1:] - if not installed_version.startswith(expected_version): - mismatches.append( - f"\t {package}: Mismatch," - f" pyproject.toml={expected_version}," - f" pip={installed_version}" - ) - else: - mismatches.append(f"\t {package}: Not found in pip list") - - return "\n" + "\n".join(mismatches) - - -def interpreter_info(interpreter): - try: - if interpreter.offline and interpreter.llm.api_base: - try: - curl = subprocess.check_output( - f"curl {interpreter.llm.api_base}" - ) - except Exception as e: - curl = str(e) - else: - curl = "Not local" - - messages_to_display = [] - for message in interpreter.messages: - message = message.copy() - try: - if len(message["content"]) > 600: - message["content"] = ( - message["content"][:300] - + "..." - + message["content"][-300:] - ) - except Exception as e: - print(str(e), "for message:", message) - messages_to_display.append(message) - - return f""" - - # Interpreter Info +# import platform +# import subprocess + +# import pkg_resources +# import psutil +# import toml + + +# def get_python_version(): +# return platform.python_version() + + +# def get_pip_version(): +# try: +# pip_version = ( +# subprocess.check_output(["pip", "--version"]) +# .decode() +# .split()[1] +# ) +# except Exception as e: +# pip_version = str(e) +# return pip_version + + +# def get_oi_version(): +# try: +# oi_version_cmd = ( +# subprocess.check_output(["interpreter", "--version"]) +# .decode() +# .split()[1] +# ) +# except Exception as e: +# oi_version_cmd = str(e) +# oi_version_pkg = pkg_resources.get_distribution( +# "open-interpreter" +# ).version +# oi_version = oi_version_cmd, oi_version_pkg +# return oi_version + + +# def get_os_version(): +# return platform.platform() + + +# def get_cpu_info(): +# return platform.processor() + + +# def get_ram_info(): +# vm = psutil.virtual_memory() +# used_ram_gb = vm.used / (1024**3) +# free_ram_gb = vm.free / (1024**3) +# total_ram_gb = vm.total / (1024**3) +# return ( +# f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:" +# f" {free_ram_gb:.2f}" +# ) + + +# def get_package_mismatches(file_path="pyproject.toml"): +# with open(file_path, "r") as file: +# pyproject = toml.load(file) +# dependencies = pyproject["tool"]["poetry"]["dependencies"] +# dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][ +# "dependencies" +# ] +# dependencies.update(dev_dependencies) + +# installed_packages = { +# pkg.key: pkg.version for pkg in pkg_resources.working_set +# } + +# mismatches = [] +# for package, version_info in dependencies.items(): +# if isinstance(version_info, dict): +# version_info = version_info["version"] +# installed_version = installed_packages.get(package) +# if installed_version and version_info.startswith("^"): +# expected_version = version_info[1:] +# if not installed_version.startswith(expected_version): +# mismatches.append( +# f"\t {package}: Mismatch," +# f" pyproject.toml={expected_version}," +# f" pip={installed_version}" +# ) +# else: +# mismatches.append(f"\t {package}: Not found in pip list") + +# return "\n" + "\n".join(mismatches) + + +# def interpreter_info(interpreter): +# try: +# if interpreter.offline and interpreter.llm.api_base: +# try: +# curl = subprocess.check_output( +# f"curl {interpreter.llm.api_base}" +# ) +# except Exception as e: +# curl = str(e) +# else: +# curl = "Not local" + +# messages_to_display = [] +# for message in interpreter.messages: +# message = message.copy() +# try: +# if len(message["content"]) > 600: +# message["content"] = ( +# message["content"][:300] +# + "..." +# + message["content"][-300:] +# ) +# except KeyError as e: +# print(f"KeyError {str(e)} for message: {message}") +# messages_to_display.append(message) + +# return f""" + +# # Interpreter Info - Vision: {interpreter.llm.supports_vision} - Model: {interpreter.llm.model} - Function calling: {interpreter.llm.supports_functions} - Context window: {interpreter.llm.context_window} - Max tokens: {interpreter.llm.max_tokens} - - Auto run: {interpreter.auto_run} - API base: {interpreter.llm.api_base} - Offline: {interpreter.offline} - - Curl output: {curl} - - # Messages - - System Message: {interpreter.system_message} - - """ + "\n\n".join([str(m) for m in messages_to_display]) - except: - return "Error, couldn't get interpreter info" - - -def system_info(interpreter): - oi_version = get_oi_version() - print(f""" - Python Version: {get_python_version()} - Pip Version: {get_pip_version()} - Open-interpreter Version: cmd:{oi_version[0]}, pkg: {oi_version[1]} - OS Version and Architecture: {get_os_version()} - CPU Info: {get_cpu_info()} - RAM Info: {get_ram_info()} - {interpreter_info(interpreter)} - """) - - # Removed the following, as it causes `FileNotFoundError: [Errno 2] No such file or directory: 'pyproject.toml'`` on prod - # (i think it works on dev, but on prod the pyproject.toml will not be in the cwd. might not be accessible at all) - # Package Version Mismatches: - # {get_package_mismatches()} +# Vision: {interpreter.llm.supports_vision} +# Model: {interpreter.llm.model} +# Function calling: {interpreter.llm.supports_functions} +# Context window: {interpreter.llm.context_window} +# Max tokens: {interpreter.llm.max_tokens} + +# Auto run: {interpreter.auto_run} +# API base: {interpreter.llm.api_base} +# Offline: {interpreter.offline} + +# Curl output: {curl} + +# # Messages + +# System Message: {interpreter.system_message} + +# """ + "\n\n".join([str(m) for m in messages_to_display]) +# except AttributeError as e: +# return f"Error, couldn't get interpreter info: {str(e)}" + + +# def system_info(interpreter): +# oi_version = get_oi_version() +# print(f""" +# Python Version: {get_python_version()} +# Pip Version: {get_pip_version()} +# Open-interpreter Version: cmd:{oi_version[0]}, pkg: {oi_version[1]} +# OS Version and Architecture: {get_os_version()} +# CPU Info: {get_cpu_info()} +# RAM Info: {get_ram_info()} +# {interpreter_info(interpreter)} +# """) + +# # Removed the following, as it causes `FileNotFoundError: [Errno 2] No such file or directory: 'pyproject.toml'`` on prod +# # (i think it works on dev, but on prod the pyproject.toml will not be in the cwd. might not be accessible at all) +# # Package Version Mismatches: +# # {get_package_mismatches()} diff --git a/swarms/tools/logits_processor.py b/swarms/tools/logits_processor.py index ed7fef18..c6ba1691 100644 --- a/swarms/tools/logits_processor.py +++ b/swarms/tools/logits_processor.py @@ -1,3 +1,5 @@ +"""Logits processors for the GPT-Neo model.""" + from transformers import ( PreTrainedTokenizer, LogitsWarper, @@ -48,7 +50,7 @@ class NumberStoppingCriteria(StoppingCriteria): scores: torch.FloatTensor, ) -> bool: decoded = self.tokenizer.decode( - input_ids[0][self.prompt_length :], + input_ids[0][self.prompt_length:], skip_special_tokens=True, ) diff --git a/swarms/tools/tool.py b/swarms/tools/tool.py index 1029a183..38f01057 100644 --- a/swarms/tools/tool.py +++ b/swarms/tools/tool.py @@ -1,4 +1,6 @@ """Base implementation for tools or skills.""" +# flake8: noqa E501 + from __future__ import annotations import asyncio diff --git a/swarms/tools/tool_func_doc_scraper.py b/swarms/tools/tool_func_doc_scraper.py index d233bfae..8ec05ed9 100644 --- a/swarms/tools/tool_func_doc_scraper.py +++ b/swarms/tools/tool_func_doc_scraper.py @@ -1,3 +1,6 @@ +""" This module contains a function that scrapes the docstrings and parameters of a function decorated with `tool` and returns a formatted string. """ +# flake8: noqa E501 + import inspect from typing import Callable from termcolor import colored diff --git a/swarms/utils/__init__.py b/swarms/utils/__init__.py index c1479507..e7e22f2f 100644 --- a/swarms/utils/__init__.py +++ b/swarms/utils/__init__.py @@ -12,7 +12,7 @@ from swarms.utils.prep_torch_model_inference import ( prep_torch_inference, ) from swarms.utils.token_count_tiktoken import limit_tokens_from_string - +from swarms.utils.try_except_wrapper import try_except_wrapper __all__ = [ "SubprocessCodeInterpreter", diff --git a/swarms/utils/apa.py b/swarms/utils/apa.py index fa73b7b4..7ae61a79 100644 --- a/swarms/utils/apa.py +++ b/swarms/utils/apa.py @@ -1,7 +1,11 @@ +""" This module contains the data structure of the APA framework. """ +# flake8: noqa W291 + from enum import Enum, unique, auto import abc from typing import List, Optional import json +from json.decoder import JSONDecodeError from dataclasses import dataclass, field @@ -108,7 +112,8 @@ class Action: def to_json(self): try: tool_output = json.loads(self.tool_output) - except: + except JSONDecodeError: + # print("Failed to decode JSON. Using raw output instead.") tool_output = self.tool_output return { "thought": self.thought, diff --git a/swarms/utils/code_interpreter.py b/swarms/utils/code_interpreter.py index 9e27b668..a59244a0 100644 --- a/swarms/utils/code_interpreter.py +++ b/swarms/utils/code_interpreter.py @@ -1,3 +1,6 @@ +""" This module contains the SubprocessCodeInterpreter class, which is a base class for code interpreters that run code in a subprocess. """ +# flake8: noqa E501 + import subprocess import threading import queue diff --git a/swarms/utils/execute_futures.py b/swarms/utils/execute_futures.py index bc2d47ef..3945911d 100644 --- a/swarms/utils/execute_futures.py +++ b/swarms/utils/execute_futures.py @@ -1,3 +1,6 @@ +"""Execute a dictionary of futures and return the results. """ +# flake8: noqa E501 + from concurrent import futures from concurrent.futures import Future from typing import TypeVar, Dict diff --git a/swarms/utils/find_img_path.py b/swarms/utils/find_img_path.py index 2ca5d082..5e0d6166 100644 --- a/swarms/utils/find_img_path.py +++ b/swarms/utils/find_img_path.py @@ -1,3 +1,6 @@ +""" Find the image path from the text """ +# flake8: noqa E501 + import os import re diff --git a/swarms/utils/load_model_torch.py b/swarms/utils/load_model_torch.py index 53649e93..113ff966 100644 --- a/swarms/utils/load_model_torch.py +++ b/swarms/utils/load_model_torch.py @@ -1,3 +1,6 @@ +""" Load a PyTorch model from a given path and move it to the specified device. """ +# flake8: noqa E501 + import torch from torch import nn diff --git a/swarms/utils/loggers.py b/swarms/utils/loggers.py index 68477132..a0330787 100644 --- a/swarms/utils/loggers.py +++ b/swarms/utils/loggers.py @@ -1,4 +1,6 @@ """Logging modules""" +# flake8: noqa E501 + import logging import os import random diff --git a/swarms/utils/main.py b/swarms/utils/main.py index b94fae11..5bad642a 100644 --- a/swarms/utils/main.py +++ b/swarms/utils/main.py @@ -1,3 +1,8 @@ +""" + This file contains all the utility functions and classes used in the project. + It also contains the code for uploading files to S3 and static folder. +""" +# flake8: noqa E501 import os import random import shutil @@ -390,7 +395,7 @@ class FileHandler: "SERVER", "http://localhost:8000" ) ) - + 1 : + + 1: ] local_filename = ( Path("file") / local_filepath.split("/")[-1] diff --git a/swarms/utils/markdown_message.py b/swarms/utils/markdown_message.py index 57cd285f..83cf27a5 100644 --- a/swarms/utils/markdown_message.py +++ b/swarms/utils/markdown_message.py @@ -1,3 +1,5 @@ +""" Markdown message display. """ +# flake8: noqa E501 from rich.console import Console from rich.markdown import Markdown from rich.rule import Rule diff --git a/swarms/utils/parse_code.py b/swarms/utils/parse_code.py index 838d5868..52742e79 100644 --- a/swarms/utils/parse_code.py +++ b/swarms/utils/parse_code.py @@ -1,3 +1,5 @@ +""" This module contains functions for parsing code from Markdown files. """ +# flake8: noqa E501 import re diff --git a/swarms/utils/serializable.py b/swarms/utils/serializable.py index de9444ef..3d7e7c81 100644 --- a/swarms/utils/serializable.py +++ b/swarms/utils/serializable.py @@ -1,3 +1,5 @@ +""" Serializable base class. """ +# flake8: noqa E501 from abc import ABC from typing import Any, Dict, List, Literal, TypedDict, Union, cast diff --git a/swarms/workers/base.py b/swarms/workers/base.py index 358810bd..d5b76815 100644 --- a/swarms/workers/base.py +++ b/swarms/workers/base.py @@ -1,3 +1,5 @@ +""" (In preview) An abstract class for AI worker. """ +# flake8: noqa E501 from typing import Dict, List, Optional, Union diff --git a/tests/telemetry/test_sys_info.py b/tests/telemetry/test_sys_info.py new file mode 100644 index 00000000..fa88d577 --- /dev/null +++ b/tests/telemetry/test_sys_info.py @@ -0,0 +1,37 @@ +"""Tests for the sys_info module.""" + +# import pytest +# from unittest.mock import Mock +# from sys_info import interpreter_info, system_info + +# def test_interpreter_info(mocker): +# """Test interpreter_info.""" +# mocker.patch('subprocess.check_output', return_value='curl output') +# interpreter = Mock() +# interpreter.offline = True +# interpreter.llm.api_base = 'http://api_base' +# interpreter.llm.supports_vision = True +# interpreter.llm.model = 'model' +# interpreter.llm.supports_functions = True +# interpreter.llm.context_window = 'context_window' +# interpreter.llm.max_tokens = 100 +# interpreter.auto_run = True +# interpreter.llm.api_base = 'http://api_base' +# interpreter.offline = True +# interpreter.system_message = 'system_message' +# interpreter.messages = [{'content': 'message_content'}] +# result = interpreter_info(interpreter) +# assert 'curl output' in result + +# def test_system_info(mocker): +# """Test system_info.""" +# mocker.patch('your_module.get_oi_version', return_value=('cmd_version', 'pkg_version')) # replace with your actual module name +# mocker.patch('your_module.get_python_version', return_value='python_version') # replace with your actual module name +# mocker.patch('your_module.get_pip_version', return_value='pip_version') # replace with your actual module name +# mocker.patch('your_module.get_os_version', return_value='os_version') # replace with your actual module name +# mocker.patch('your_module.get_cpu_info', return_value='cpu_info') # replace with your actual module name +# mocker.patch('your_module.get_ram_info', return_value='ram_info') # replace with your actual module name +# mocker.patch('your_module.interpreter_info', return_value='interpreter_info') # replace with your actual module name +# interpreter = Mock() +# result = system_info(interpreter) +# assert 'interpreter_info' in result \ No newline at end of file