From 33e0f9f11055c0b7578410aa9c867f715e08a16b Mon Sep 17 00:00:00 2001 From: vyomakesh09 Date: Fri, 29 Dec 2023 00:16:38 +0000 Subject: [PATCH] new file: output.txt modified: pyproject.toml modified: requirements.txt modified: swarms/models/kosmos_two.py deleted: swarms/models/multion.py modified: swarms/utils/code_interpreter.py new file: tests/__init__.py deleted: tests/models/test_multion.py modified: tests/models/test_ssd_1b.py modified: tests/utils/test_class_args_wrapper.py modified: tests/utils/test_subprocess_code_interpreter.py --- output.txt | 397 ++++++++++++++++++ pyproject.toml | 1 + requirements.txt | 1 + swarms/models/kosmos_two.py | 5 +- swarms/models/multion.py | 60 --- swarms/utils/code_interpreter.py | 17 +- tests/__init__.py | 0 tests/models/test_multion.py | 54 --- tests/models/test_ssd_1b.py | 69 --- tests/utils/test_class_args_wrapper.py | 5 +- .../utils/test_subprocess_code_interpreter.py | 335 +++------------ 11 files changed, 458 insertions(+), 486 deletions(-) create mode 100644 output.txt delete mode 100644 swarms/models/multion.py create mode 100644 tests/__init__.py delete mode 100644 tests/models/test_multion.py diff --git a/output.txt b/output.txt new file mode 100644 index 00000000..4c385f2b --- /dev/null +++ b/output.txt @@ -0,0 +1,397 @@ +============================= test session starts ============================== +platform linux -- Python 3.10.12, pytest-7.4.2, pluggy-1.3.0 +benchmark: 4.0.0 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000) +rootdir: /home/v/vswarms +plugins: benchmark-4.0.0, anyio-3.7.1 +collected 812 items / 11 errors + +==================================== ERRORS ==================================== +_________________ ERROR collecting tests/memory/test_pq_db.py __________________ +ImportError while importing test module '/home/v/vswarms/tests/memory/test_pq_db.py'. +Hint: make sure your test modules/packages have valid Python names. +Traceback: +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +tests/memory/test_pq_db.py:6: in + from swarms.memory.pg import PostgresDB +../.local/lib/python3.10/site-packages/swarms/memory/pg.py:6: in + from swarms.memory.base import BaseVectorStore +E ModuleNotFoundError: No module named 'swarms.memory.base' +________________ ERROR collecting tests/memory/test_weaviate.py ________________ +ImportError while importing test module '/home/v/vswarms/tests/memory/test_weaviate.py'. +Hint: make sure your test modules/packages have valid Python names. +Traceback: +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +tests/memory/test_weaviate.py:3: in + from swarms.memory import WeaviateDB +E ImportError: cannot import name 'WeaviateDB' from 'swarms.memory' (/home/v/.local/lib/python3.10/site-packages/swarms/memory/__init__.py) +________________ ERROR collecting tests/models/test_kosmos2.py _________________ +ImportError while importing test module '/home/v/vswarms/tests/models/test_kosmos2.py'. +Hint: make sure your test modules/packages have valid Python names. +Traceback: +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +tests/models/test_kosmos2.py:4: in + from swarms.models.kosmos2 import Kosmos2, Detections +../.local/lib/python3.10/site-packages/swarms/models/kosmos2.py:4: in + from pydantic import BaseModel, model_validator, validator +E ImportError: cannot import name 'model_validator' from 'pydantic' (/home/v/.local/lib/python3.10/site-packages/pydantic/__init__.cpython-310-x86_64-linux-gnu.so) +________________ ERROR collecting tests/models/test_multion.py _________________ +ImportError while importing test module '/home/v/vswarms/tests/models/test_multion.py'. +Hint: make sure your test modules/packages have valid Python names. +Traceback: +../.local/lib/python3.10/site-packages/swarms/models/multion.py:5: in + import multion +E ModuleNotFoundError: No module named 'multion' + +During handling of the above exception, another exception occurred: +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +tests/models/test_multion.py:3: in + from swarms.models.multion import MultiOn +../.local/lib/python3.10/site-packages/swarms/models/multion.py:8: in + raise ImportError( +E ImportError: Cannot import multion, please install 'pip install' +_________________ ERROR collecting tests/models/test_ssd_1b.py _________________ +ImportError while importing test module '/home/v/vswarms/tests/models/test_ssd_1b.py'. +Hint: make sure your test modules/packages have valid Python names. +Traceback: +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +tests/models/test_ssd_1b.py:2: in + from swarms.models.ssd_1b import SSD1B +../.local/lib/python3.10/site-packages/swarms/models/ssd_1b.py:10: in + from diffusers import StableDiffusionXLPipeline +E ImportError: cannot import name 'StableDiffusionXLPipeline' from 'diffusers' (/home/v/.local/lib/python3.10/site-packages/diffusers/__init__.py) +________________ ERROR collecting tests/models/test_whisperx.py ________________ +ImportError while importing test module '/home/v/vswarms/tests/models/test_whisperx.py'. +Hint: make sure your test modules/packages have valid Python names. +Traceback: +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +tests/models/test_whisperx.py:7: in + import whisperx +E ModuleNotFoundError: No module named 'whisperx' +______________ ERROR collecting tests/structs/test_autoscaler.py _______________ +../.local/lib/python3.10/site-packages/_pytest/runner.py:341: in from_call + result: Optional[TResult] = func() +../.local/lib/python3.10/site-packages/_pytest/runner.py:372: in + call = CallInfo.from_call(lambda: list(collector.collect()), "collect") +../.local/lib/python3.10/site-packages/_pytest/python.py:531: in collect + self._inject_setup_module_fixture() +../.local/lib/python3.10/site-packages/_pytest/python.py:545: in _inject_setup_module_fixture + self.obj, ("setUpModule", "setup_module") +../.local/lib/python3.10/site-packages/_pytest/python.py:310: in obj + self._obj = obj = self._getobj() +../.local/lib/python3.10/site-packages/_pytest/python.py:528: in _getobj + return self._importtestmodule() +../.local/lib/python3.10/site-packages/_pytest/python.py:617: in _importtestmodule + mod = import_path(self.path, mode=importmode, root=self.config.rootpath) +../.local/lib/python3.10/site-packages/_pytest/pathlib.py:567: in import_path + importlib.import_module(module_name) +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +:1050: in _gcd_import + ??? +:1027: in _find_and_load + ??? +:1006: in _find_and_load_unlocked + ??? +:688: in _load_unlocked + ??? +../.local/lib/python3.10/site-packages/_pytest/assertion/rewrite.py:178: in exec_module + exec(co, module.__dict__) +tests/structs/test_autoscaler.py:15: in + llm = OpenAIChat( +../.local/lib/python3.10/site-packages/langchain/load/serializable.py:97: in __init__ + super().__init__(**kwargs) +pydantic/main.py:341: in pydantic.main.BaseModel.__init__ + ??? +E pydantic.error_wrappers.ValidationError: 1 validation error for OpenAIChat +E __root__ +E Did not find openai_api_key, please add an environment variable `OPENAI_API_KEY` which contains it, or pass `openai_api_key` as a named parameter. (type=value_error) +_______________ ERROR collecting tests/swarms/test_groupchat.py ________________ +../.local/lib/python3.10/site-packages/_pytest/runner.py:341: in from_call + result: Optional[TResult] = func() +../.local/lib/python3.10/site-packages/_pytest/runner.py:372: in + call = CallInfo.from_call(lambda: list(collector.collect()), "collect") +../.local/lib/python3.10/site-packages/_pytest/python.py:531: in collect + self._inject_setup_module_fixture() +../.local/lib/python3.10/site-packages/_pytest/python.py:545: in _inject_setup_module_fixture + self.obj, ("setUpModule", "setup_module") +../.local/lib/python3.10/site-packages/_pytest/python.py:310: in obj + self._obj = obj = self._getobj() +../.local/lib/python3.10/site-packages/_pytest/python.py:528: in _getobj + return self._importtestmodule() +../.local/lib/python3.10/site-packages/_pytest/python.py:617: in _importtestmodule + mod = import_path(self.path, mode=importmode, root=self.config.rootpath) +../.local/lib/python3.10/site-packages/_pytest/pathlib.py:567: in import_path + importlib.import_module(module_name) +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +:1050: in _gcd_import + ??? +:1027: in _find_and_load + ??? +:1006: in _find_and_load_unlocked + ??? +:688: in _load_unlocked + ??? +../.local/lib/python3.10/site-packages/_pytest/assertion/rewrite.py:178: in exec_module + exec(co, module.__dict__) +tests/swarms/test_groupchat.py:9: in + llm2 = Anthropic() +../.local/lib/python3.10/site-packages/langchain/load/serializable.py:97: in __init__ + super().__init__(**kwargs) +pydantic/main.py:341: in pydantic.main.BaseModel.__init__ + ??? +E pydantic.error_wrappers.ValidationError: 1 validation error for Anthropic +E __root__ +E Did not find anthropic_api_key, please add an environment variable `ANTHROPIC_API_KEY` which contains it, or pass `anthropic_api_key` as a named parameter. (type=value_error) +____________ ERROR collecting tests/telemetry/test_posthog_utils.py ____________ +../.local/lib/python3.10/site-packages/_pytest/runner.py:341: in from_call + result: Optional[TResult] = func() +../.local/lib/python3.10/site-packages/_pytest/runner.py:372: in + call = CallInfo.from_call(lambda: list(collector.collect()), "collect") +../.local/lib/python3.10/site-packages/_pytest/python.py:531: in collect + self._inject_setup_module_fixture() +../.local/lib/python3.10/site-packages/_pytest/python.py:545: in _inject_setup_module_fixture + self.obj, ("setUpModule", "setup_module") +../.local/lib/python3.10/site-packages/_pytest/python.py:310: in obj + self._obj = obj = self._getobj() +../.local/lib/python3.10/site-packages/_pytest/python.py:528: in _getobj + return self._importtestmodule() +../.local/lib/python3.10/site-packages/_pytest/python.py:617: in _importtestmodule + mod = import_path(self.path, mode=importmode, root=self.config.rootpath) +../.local/lib/python3.10/site-packages/_pytest/pathlib.py:567: in import_path + importlib.import_module(module_name) +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +:1050: in _gcd_import + ??? +:1027: in _find_and_load + ??? +:1006: in _find_and_load_unlocked + ??? +:688: in _load_unlocked + ??? +../.local/lib/python3.10/site-packages/_pytest/assertion/rewrite.py:178: in exec_module + exec(co, module.__dict__) +tests/telemetry/test_posthog_utils.py:5: in + from swarms.telemetry.posthog_utils import ( +../.local/lib/python3.10/site-packages/swarms/telemetry/posthog_utils.py:15: in + posthog = Posthog(api_key, host=host) +../.local/lib/python3.10/site-packages/posthog/client.py:58: in __init__ + require("api_key", self.api_key, string_types) +../.local/lib/python3.10/site-packages/posthog/client.py:751: in require + raise AssertionError(msg) +E AssertionError: api_key must have (,), got: None +__________________ ERROR collecting tests/tools/test_base.py ___________________ +import file mismatch: +imported module 'test_base' has this __file__ attribute: + /home/v/vswarms/tests/structs/test_base.py +which is not the same as the test file we want to collect: + /home/v/vswarms/tests/tools/test_base.py +HINT: remove __pycache__ / .pyc files and/or use a unique basename for your test file modules +_________________ ERROR collecting tests/utils/test_device.py __________________ +ImportError while importing test module '/home/v/vswarms/tests/utils/test_device.py'. +Hint: make sure your test modules/packages have valid Python names. +Traceback: +/usr/lib/python3.10/importlib/__init__.py:126: in import_module + return _bootstrap._gcd_import(name[level:], package, level) +tests/utils/test_device.py:4: in + from swarms.utils.device_checker_cuda import check_device +E ModuleNotFoundError: No module named 'swarms.utils.device_checker_cuda' +=============================== warnings summary =============================== +../.local/lib/python3.10/site-packages/PyPDF2/__init__.py:21 + /home/v/.local/lib/python3.10/site-packages/PyPDF2/__init__.py:21: DeprecationWarning: PyPDF2 is deprecated. Please move to the pypdf library instead. + warnings.warn( + +../.local/lib/python3.10/site-packages/tensorflow/__init__.py:29 + /home/v/.local/lib/python3.10/site-packages/tensorflow/__init__.py:29: DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives + import distutils as _distutils + +../.local/lib/python3.10/site-packages/tensorflow/python/framework/dtypes.py:35 + /home/v/.local/lib/python3.10/site-packages/tensorflow/python/framework/dtypes.py:35: DeprecationWarning: ml_dtypes.float8_e4m3b11 is deprecated. Use ml_dtypes.float8_e4m3b11fnuz + from tensorflow.tsl.python.lib.core import pywrap_ml_dtypes + +../.local/lib/python3.10/site-packages/timm/models/layers/__init__.py:49 + /home/v/.local/lib/python3.10/site-packages/timm/models/layers/__init__.py:49: DeprecationWarning: Importing from timm.models.layers is deprecated, please import via timm.layers + warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", DeprecationWarning) + +-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html +=========================== short test summary info ============================ +ERROR tests/memory/test_pq_db.py +ERROR tests/memory/test_weaviate.py +ERROR tests/models/test_kosmos2.py +ERROR tests/models/test_multion.py +ERROR tests/models/test_ssd_1b.py +ERROR tests/models/test_whisperx.py +ERROR tests/structs/test_autoscaler.py - pydantic.error_wrappers.ValidationEr... +ERROR tests/swarms/test_groupchat.py - pydantic.error_wrappers.ValidationErro... +ERROR tests/telemetry/test_posthog_utils.py - AssertionError: api_key must ha... +ERROR tests/tools/test_base.py +ERROR tests/utils/test_device.py +!!!!!!!!!!!!!!!!!!! Interrupted: 11 errors during collection !!!!!!!!!!!!!!!!!!! +======================== 4 warnings, 11 errors in 6.22s ======================== +--- Logging error --- +Traceback (most recent call last): + File "/usr/lib/python3.10/logging/__init__.py", line 1103, in emit + stream.write(msg + self.terminator) +ValueError: I/O operation on closed file. +Call stack: + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 444, in matmul_ext_update_autotune_table + fp16_matmul._update_autotune_table() + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 421, in _update_autotune_table + TritonMatmul._update_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 150, in _update_autotune_table + cache_manager.put(autotune_table) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 66, in put + with FileLock(self.lock_path): + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 262, in __enter__ + self.acquire() + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 219, in acquire + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) +Message: 'Attempting to acquire lock %s on %s' +Arguments: (140110562667408, '/home/v/.triton/autotune/Fp16Matmul_2d_kernel.pickle.lock') +--- Logging error --- +Traceback (most recent call last): + File "/usr/lib/python3.10/logging/__init__.py", line 1103, in emit + stream.write(msg + self.terminator) +ValueError: I/O operation on closed file. +Call stack: + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 444, in matmul_ext_update_autotune_table + fp16_matmul._update_autotune_table() + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 421, in _update_autotune_table + TritonMatmul._update_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 150, in _update_autotune_table + cache_manager.put(autotune_table) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 66, in put + with FileLock(self.lock_path): + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 262, in __enter__ + self.acquire() + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 222, in acquire + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) +Message: 'Lock %s acquired on %s' +Arguments: (140110562667408, '/home/v/.triton/autotune/Fp16Matmul_2d_kernel.pickle.lock') +--- Logging error --- +Traceback (most recent call last): + File "/usr/lib/python3.10/logging/__init__.py", line 1103, in emit + stream.write(msg + self.terminator) +ValueError: I/O operation on closed file. +Call stack: + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 444, in matmul_ext_update_autotune_table + fp16_matmul._update_autotune_table() + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 421, in _update_autotune_table + TritonMatmul._update_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 150, in _update_autotune_table + cache_manager.put(autotune_table) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 66, in put + with FileLock(self.lock_path): + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 278, in __exit__ + self.release() + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 251, in release + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) +Message: 'Attempting to release lock %s on %s' +Arguments: (140110562667408, '/home/v/.triton/autotune/Fp16Matmul_2d_kernel.pickle.lock') +--- Logging error --- +Traceback (most recent call last): + File "/usr/lib/python3.10/logging/__init__.py", line 1103, in emit + stream.write(msg + self.terminator) +ValueError: I/O operation on closed file. +Call stack: + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 444, in matmul_ext_update_autotune_table + fp16_matmul._update_autotune_table() + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 421, in _update_autotune_table + TritonMatmul._update_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 150, in _update_autotune_table + cache_manager.put(autotune_table) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 66, in put + with FileLock(self.lock_path): + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 278, in __exit__ + self.release() + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 254, in release + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) +Message: 'Lock %s released on %s' +Arguments: (140110562667408, '/home/v/.triton/autotune/Fp16Matmul_2d_kernel.pickle.lock') +--- Logging error --- +Traceback (most recent call last): + File "/usr/lib/python3.10/logging/__init__.py", line 1103, in emit + stream.write(msg + self.terminator) +ValueError: I/O operation on closed file. +Call stack: + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 444, in matmul_ext_update_autotune_table + fp16_matmul._update_autotune_table() + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 422, in _update_autotune_table + TritonMatmul._update_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 150, in _update_autotune_table + cache_manager.put(autotune_table) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 66, in put + with FileLock(self.lock_path): + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 262, in __enter__ + self.acquire() + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 219, in acquire + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) +Message: 'Attempting to acquire lock %s on %s' +Arguments: (140110562667408, '/home/v/.triton/autotune/Fp16Matmul_4d_kernel.pickle.lock') +--- Logging error --- +Traceback (most recent call last): + File "/usr/lib/python3.10/logging/__init__.py", line 1103, in emit + stream.write(msg + self.terminator) +ValueError: I/O operation on closed file. +Call stack: + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 444, in matmul_ext_update_autotune_table + fp16_matmul._update_autotune_table() + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 422, in _update_autotune_table + TritonMatmul._update_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 150, in _update_autotune_table + cache_manager.put(autotune_table) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 66, in put + with FileLock(self.lock_path): + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 262, in __enter__ + self.acquire() + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 222, in acquire + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) +Message: 'Lock %s acquired on %s' +Arguments: (140110562667408, '/home/v/.triton/autotune/Fp16Matmul_4d_kernel.pickle.lock') +--- Logging error --- +Traceback (most recent call last): + File "/usr/lib/python3.10/logging/__init__.py", line 1103, in emit + stream.write(msg + self.terminator) +ValueError: I/O operation on closed file. +Call stack: + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 444, in matmul_ext_update_autotune_table + fp16_matmul._update_autotune_table() + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 422, in _update_autotune_table + TritonMatmul._update_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 150, in _update_autotune_table + cache_manager.put(autotune_table) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 66, in put + with FileLock(self.lock_path): + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 278, in __exit__ + self.release() + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 251, in release + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) +Message: 'Attempting to release lock %s on %s' +Arguments: (140110562667408, '/home/v/.triton/autotune/Fp16Matmul_4d_kernel.pickle.lock') +--- Logging error --- +Traceback (most recent call last): + File "/usr/lib/python3.10/logging/__init__.py", line 1103, in emit + stream.write(msg + self.terminator) +ValueError: I/O operation on closed file. +Call stack: + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 444, in matmul_ext_update_autotune_table + fp16_matmul._update_autotune_table() + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 422, in _update_autotune_table + TritonMatmul._update_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 150, in _update_autotune_table + cache_manager.put(autotune_table) + File "/home/v/.local/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/matmul_ext.py", line 66, in put + with FileLock(self.lock_path): + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 278, in __exit__ + self.release() + File "/home/v/.local/lib/python3.10/site-packages/filelock/_api.py", line 254, in release + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) +Message: 'Lock %s released on %s' +Arguments: (140110562667408, '/home/v/.triton/autotune/Fp16Matmul_4d_kernel.pickle.lock') diff --git a/pyproject.toml b/pyproject.toml index cf214581..6c14f422 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,6 +65,7 @@ open_clip_torch = "2.20.0" soundfile = "0.12.1" torchvision = "0.16.1" rich = "13.5.2" +peft = "*" [tool.poetry.group.lint.dependencies] diff --git a/requirements.txt b/requirements.txt index ed4c9069..2ac7e502 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,3 +69,4 @@ mkdocs mkdocs-material mkdocs-glightbox pre-commit==3.2.2 +peft diff --git a/swarms/models/kosmos_two.py b/swarms/models/kosmos_two.py index a0c5a86a..b953ddf9 100644 --- a/swarms/models/kosmos_two.py +++ b/swarms/models/kosmos_two.py @@ -8,8 +8,7 @@ import torchvision.transforms as T from PIL import Image from transformers import AutoModelForVision2Seq, AutoProcessor -from swarms.models.base_multimodal_model import BaseMultimodalModel - +from swarms.models.base_multimodal_model import BaseMultiModalModel # utils def is_overlapping(rect1, rect2): @@ -18,7 +17,7 @@ def is_overlapping(rect1, rect2): return not (x2 < x3 or x1 > x4 or y2 < y3 or y1 > y4) -class Kosmos(BaseMultimodalModel): +class Kosmos(BaseMultiModalModel): """ Kosmos model by Yen-Chun Shieh diff --git a/swarms/models/multion.py b/swarms/models/multion.py deleted file mode 100644 index 14152faf..00000000 --- a/swarms/models/multion.py +++ /dev/null @@ -1,60 +0,0 @@ -from swarms.models.base_llm import AbstractLLM - - -try: - import multion - -except ImportError: - raise ImportError( - "Cannot import multion, please install 'pip install'" - ) - - -class MultiOn(AbstractLLM): - """ - MultiOn is a wrapper for the Multion API. - - Args: - **kwargs: - - Methods: - run(self, task: str, url: str, *args, **kwargs) - - Example: - >>> from swarms.models.multion import MultiOn - >>> multion = MultiOn() - >>> multion.run("Order chicken tendies", "https://www.google.com/") - "Order chicken tendies. https://www.google.com/" - - """ - - def __init__(self, **kwargs): - super(MultiOn, self).__init__(**kwargs) - - def run(self, task: str, url: str, *args, **kwargs) -> str: - """Run the multion model - - Args: - task (str): _description_ - url (str): _description_ - - Returns: - str: _description_ - """ - response = multion.new_session({"input": task, "url": url}) - return response - - def generate_summary( - self, task: str, url: str, *args, **kwargs - ) -> str: - """Generate a summary from the multion model - - Args: - task (str): _description_ - url (str): _description_ - - Returns: - str: _description_ - """ - response = multion.new_session({"input": task, "url": url}) - return response diff --git a/swarms/utils/code_interpreter.py b/swarms/utils/code_interpreter.py index 98fbab70..b42b2463 100644 --- a/swarms/utils/code_interpreter.py +++ b/swarms/utils/code_interpreter.py @@ -5,22 +5,7 @@ import time import traceback -class BaseCodeInterpreter: - """ - .run is a generator that yields a dict with attributes: active_line, output - """ - - def __init__(self): - pass - - def run(self, code): - pass - - def terminate(self): - pass - - -class SubprocessCodeInterpreter(BaseCodeInterpreter): +class SubprocessCodeInterpreter: """ SubprocessCodeinterpreter is a base class for code interpreters that run code in a subprocess. diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/models/test_multion.py b/tests/models/test_multion.py deleted file mode 100644 index cc91b421..00000000 --- a/tests/models/test_multion.py +++ /dev/null @@ -1,54 +0,0 @@ -import pytest -from unittest.mock import Mock, patch -from swarms.models.multion import MultiOn - - -@pytest.fixture -def multion_instance(): - return MultiOn() - - -@pytest.fixture -def mock_multion(): - return Mock() - - -def test_multion_import(): - with pytest.raises(ImportError): - pass - - -def test_multion_init(): - multion = MultiOn() - assert isinstance(multion, MultiOn) - - -def test_multion_run_with_valid_input(multion_instance, mock_multion): - task = "Order chicken tendies" - url = "https://www.google.com/" - mock_multion.new_session.return_value = ( - "Order chicken tendies. https://www.google.com/" - ) - - with patch("swarms.models.multion.multion", mock_multion): - response = multion_instance.run(task, url) - - assert ( - response == "Order chicken tendies. https://www.google.com/" - ) - - -def test_multion_run_with_invalid_input( - multion_instance, mock_multion -): - task = "" - url = "https://www.google.com/" - mock_multion.new_session.return_value = None - - with patch("swarms.models.multion.multion", mock_multion): - response = multion_instance.run(task, url) - - assert response is None - - -# Add more test cases to cover different scenarios, edge cases, and error handling as needed. diff --git a/tests/models/test_ssd_1b.py b/tests/models/test_ssd_1b.py index 35cc4864..95f322ef 100644 --- a/tests/models/test_ssd_1b.py +++ b/tests/models/test_ssd_1b.py @@ -164,72 +164,3 @@ def test_ssd1b_repr_str(ssd1b_model): assert str(ssd1b_model) == f"SSD1B(image_url={image_url})" -import pytest -from your_module import SSD1B - - -# Create fixtures if needed -@pytest.fixture -def ssd1b_model(): - return SSD1B() - - -# Test cases for additional scenarios and behaviors -def test_ssd1b_dashboard_printing(ssd1b_model, capsys): - ssd1b_model.dashboard = True - ssd1b_model.print_dashboard() - captured = capsys.readouterr() - assert "SSD1B Dashboard:" in captured.out - - -def test_ssd1b_generate_image_name(ssd1b_model): - task = "A painting of a dog" - img_name = ssd1b_model._generate_image_name(task) - assert isinstance(img_name, str) - assert len(img_name) > 0 - - -def test_ssd1b_set_width_height(ssd1b_model, mocker): - img = mocker.MagicMock() - width, height = 800, 600 - result = ssd1b_model.set_width_height(img, width, height) - assert result == img.resize.return_value - - -def test_ssd1b_read_img(ssd1b_model, mocker): - img = mocker.MagicMock() - result = ssd1b_model.read_img(img) - assert result == img.open.return_value - - -def test_ssd1b_convert_to_bytesio(ssd1b_model, mocker): - img = mocker.MagicMock() - img_format = "PNG" - result = ssd1b_model.convert_to_bytesio(img, img_format) - assert isinstance(result, bytes) - - -def test_ssd1b_save_image(ssd1b_model, mocker, tmp_path): - img = mocker.MagicMock() - img_name = "test.png" - save_path = tmp_path / img_name - ssd1b_model._download_image(img, img_name, save_path) - assert save_path.exists() - - -def test_ssd1b_repr_str(ssd1b_model): - task = "A painting of a dog" - image_url = ssd1b_model(task) - assert repr(ssd1b_model) == f"SSD1B(image_url={image_url})" - assert str(ssd1b_model) == f"SSD1B(image_url={image_url})" - - -def test_ssd1b_rate_limited_call(ssd1b_model, mocker): - task = "A painting of a dog" - mocker.patch.object( - ssd1b_model, - "__call__", - side_effect=Exception("Rate limit exceeded"), - ) - with pytest.raises(Exception, match="Rate limit exceeded"): - ssd1b_model.rate_limited_call(task) diff --git a/tests/utils/test_class_args_wrapper.py b/tests/utils/test_class_args_wrapper.py index d846f786..67a8a7fa 100644 --- a/tests/utils/test_class_args_wrapper.py +++ b/tests/utils/test_class_args_wrapper.py @@ -2,11 +2,10 @@ import pytest from io import StringIO from contextlib import redirect_stdout from swarms.utils.class_args_wrapper import print_class_parameters -from swarms.structs import Agent, Autoscaler +from swarms.structs.agent import Agent +from swarms.structs.autoscaler import Autoscaler from fastapi import FastAPI from fastapi.testclient import TestClient -from swarms.utils.class_args_wrapper import print_class_parameters -from swarms.structs import Agent, Autoscaler app = FastAPI() diff --git a/tests/utils/test_subprocess_code_interpreter.py b/tests/utils/test_subprocess_code_interpreter.py index 2c7f7e47..01f45da4 100644 --- a/tests/utils/test_subprocess_code_interpreter.py +++ b/tests/utils/test_subprocess_code_interpreter.py @@ -1,307 +1,80 @@ +import pytest import subprocess import threading -import time - -import pytest - +import queue from swarms.utils.code_interpreter import ( - BaseCodeInterpreter, SubprocessCodeInterpreter, -) +) # Adjust the import according to your project structure +# Fixture for the SubprocessCodeInterpreter instance @pytest.fixture -def subprocess_code_interpreter(): - interpreter = SubprocessCodeInterpreter() - interpreter.start_cmd = "python -c" - yield interpreter - interpreter.terminate() - - -def test_base_code_interpreter_init(): - interpreter = BaseCodeInterpreter() - assert isinstance(interpreter, BaseCodeInterpreter) - - -def test_base_code_interpreter_run_not_implemented(): - interpreter = BaseCodeInterpreter() - with pytest.raises(NotImplementedError): - interpreter.run("code") - - -def test_base_code_interpreter_terminate_not_implemented(): - interpreter = BaseCodeInterpreter() - with pytest.raises(NotImplementedError): - interpreter.terminate() - - -def test_subprocess_code_interpreter_init( - subprocess_code_interpreter, -): - assert isinstance( - subprocess_code_interpreter, SubprocessCodeInterpreter - ) - - -def test_subprocess_code_interpreter_start_process( - subprocess_code_interpreter, -): - subprocess_code_interpreter.start_process() - assert subprocess_code_interpreter.process is not None - - -def test_subprocess_code_interpreter_terminate( - subprocess_code_interpreter, -): - subprocess_code_interpreter.start_process() - subprocess_code_interpreter.terminate() - assert subprocess_code_interpreter.process.poll() is not None - - -def test_subprocess_code_interpreter_run_success( - subprocess_code_interpreter, -): - code = 'print("Hello, World!")' - result = list(subprocess_code_interpreter.run(code)) - assert any( - "Hello, World!" in output.get("output", "") - for output in result - ) - - -def test_subprocess_code_interpreter_run_with_error( - subprocess_code_interpreter, -): - code = 'print("Hello, World")\nraise ValueError("Error!")' - result = list(subprocess_code_interpreter.run(code)) - assert any( - "Error!" in output.get("output", "") for output in result - ) - - -def test_subprocess_code_interpreter_run_with_keyboard_interrupt( - subprocess_code_interpreter, -): - code = ( - 'import time\ntime.sleep(2)\nprint("Hello, World")\nraise' - " KeyboardInterrupt" - ) - result = list(subprocess_code_interpreter.run(code)) - assert any( - "KeyboardInterrupt" in output.get("output", "") - for output in result - ) - - -def test_subprocess_code_interpreter_run_max_retries( - subprocess_code_interpreter, monkeypatch -): - def mock_subprocess_popen(*args, **kwargs): - raise subprocess.CalledProcessError(1, "mocked_cmd") +def interpreter(): + return SubprocessCodeInterpreter() - monkeypatch.setattr(subprocess, "Popen", mock_subprocess_popen) - code = 'print("Hello, World!")' - result = list(subprocess_code_interpreter.run(code)) - assert any( - "Maximum retries reached. Could not execute code." - in output.get("output", "") - for output in result - ) +# Test for correct initialization +def test_initialization(interpreter): + assert interpreter.start_cmd == "" + assert interpreter.process is None + assert not interpreter.debug_mode + assert isinstance(interpreter.output_queue, queue.Queue) + assert isinstance(interpreter.done, threading.Event) -def test_subprocess_code_interpreter_run_retry_on_error( - subprocess_code_interpreter, monkeypatch -): - def mock_subprocess_popen(*args, **kwargs): - nonlocal popen_count - if popen_count == 0: - popen_count += 1 - raise subprocess.CalledProcessError(1, "mocked_cmd") - else: - return subprocess.Popen( - "echo 'Hello, World!'", - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - - monkeypatch.setattr(subprocess, "Popen", mock_subprocess_popen) - popen_count = 0 - - code = 'print("Hello, World!")' - result = list(subprocess_code_interpreter.run(code)) - assert any( - "Hello, World!" in output.get("output", "") - for output in result - ) - +# Test for starting and terminating process +def test_start_and_terminate_process(interpreter): + interpreter.start_cmd = "echo Hello" + interpreter.start_process() + assert isinstance(interpreter.process, subprocess.Popen) + interpreter.terminate() + assert ( + interpreter.process.poll() is not None + ) # Process should be terminated -# Add more tests to cover other aspects of the code and edge cases as needed -# Import statements and fixtures from the previous code block +# Test preprocess_code method +def test_preprocess_code(interpreter): + code = "print('Hello, World!')" + processed_code = interpreter.preprocess_code(code) + # Add assertions based on expected behavior of preprocess_code + assert processed_code == code # Example assertion -def test_subprocess_code_interpreter_line_postprocessor( - subprocess_code_interpreter, -): - line = "This is a test line" - processed_line = subprocess_code_interpreter.line_postprocessor( - line - ) +# Test detect_active_line method +def test_detect_active_line(interpreter): + line = "Some line of code" assert ( - processed_line == line - ) # No processing, should remain the same + interpreter.detect_active_line(line) is None + ) # Adjust assertion based on implementation -def test_subprocess_code_interpreter_preprocess_code( - subprocess_code_interpreter, -): - code = 'print("Hello, World!")' - preprocessed_code = subprocess_code_interpreter.preprocess_code( - code - ) +# Test detect_end_of_execution method +def test_detect_end_of_execution(interpreter): + line = "End of execution line" assert ( - preprocessed_code == code - ) # No preprocessing, should remain the same - - -def test_subprocess_code_interpreter_detect_active_line( - subprocess_code_interpreter, -): - line = "Active line: 5" - active_line = subprocess_code_interpreter.detect_active_line(line) - assert active_line == 5 - - -def test_subprocess_code_interpreter_detect_end_of_execution( - subprocess_code_interpreter, -): - line = "Execution completed." - end_of_execution = ( - subprocess_code_interpreter.detect_end_of_execution(line) - ) - assert end_of_execution is True - - -def test_subprocess_code_interpreter_run_debug_mode( - subprocess_code_interpreter, capsys -): - subprocess_code_interpreter.debug_mode = True - code = 'print("Hello, World!")' - list(subprocess_code_interpreter.run(code)) - captured = capsys.readouterr() - assert "Running code:\n" in captured.out - assert "Received output line:\n" in captured.out - - -def test_subprocess_code_interpreter_run_no_debug_mode( - subprocess_code_interpreter, capsys -): - subprocess_code_interpreter.debug_mode = False - code = 'print("Hello, World!")' - list(subprocess_code_interpreter.run(code)) - captured = capsys.readouterr() - assert "Running code:\n" not in captured.out - assert "Received output line:\n" not in captured.out + interpreter.detect_end_of_execution(line) is None + ) # Adjust assertion based on implementation -def test_subprocess_code_interpreter_run_empty_output_queue( - subprocess_code_interpreter, -): - code = 'print("Hello, World!")' - result = list(subprocess_code_interpreter.run(code)) - assert not any("active_line" in output for output in result) - - -def test_subprocess_code_interpreter_handle_stream_output_stdout( - subprocess_code_interpreter, -): - line = "This is a test line" - subprocess_code_interpreter.handle_stream_output( - threading.current_thread(), False - ) - subprocess_code_interpreter.process.stdout.write(line + "\n") - subprocess_code_interpreter.process.stdout.flush() - time.sleep(0.1) - output = subprocess_code_interpreter.output_queue.get() - assert output["output"] == line - - -def test_subprocess_code_interpreter_handle_stream_output_stderr( - subprocess_code_interpreter, -): - line = "This is an error line" - subprocess_code_interpreter.handle_stream_output( - threading.current_thread(), True - ) - subprocess_code_interpreter.process.stderr.write(line + "\n") - subprocess_code_interpreter.process.stderr.flush() - time.sleep(0.1) - output = subprocess_code_interpreter.output_queue.get() - assert output["output"] == line - - -def test_subprocess_code_interpreter_run_with_preprocess_code( - subprocess_code_interpreter, capsys -): - code = 'print("Hello, World!")' - subprocess_code_interpreter.preprocess_code = ( - lambda x: x.upper() - ) # Modify code in preprocess_code - result = list(subprocess_code_interpreter.run(code)) - assert any( - "Hello, World!" in output.get("output", "") - for output in result - ) - - -def test_subprocess_code_interpreter_run_with_exception( - subprocess_code_interpreter, capsys -): - code = 'print("Hello, World!")' - subprocess_code_interpreter.start_cmd = ( # Force an exception during subprocess creation - "nonexistent_command" - ) - result = list(subprocess_code_interpreter.run(code)) - assert any( - "Maximum retries reached" in output.get("output", "") - for output in result - ) - - -def test_subprocess_code_interpreter_run_with_active_line( - subprocess_code_interpreter, capsys -): - code = "a = 5\nprint(a)" # Contains an active line - result = list(subprocess_code_interpreter.run(code)) - assert any(output.get("active_line") == 5 for output in result) - +# Test line_postprocessor method +def test_line_postprocessor(interpreter): + line = "Some output line" + assert ( + interpreter.line_postprocessor(line) == line + ) # Adjust assertion based on implementation -def test_subprocess_code_interpreter_run_with_end_of_execution( - subprocess_code_interpreter, capsys -): - code = ( # Simple code without active line marker - 'print("Hello, World!")' - ) - result = list(subprocess_code_interpreter.run(code)) - assert any(output.get("active_line") is None for output in result) +# Test handle_stream_output method +def test_handle_stream_output(interpreter, monkeypatch): + # This requires more complex setup, including monkeypatching and simulating stream output + # Example setup + def mock_readline(): + yield "output line" + yield "" -def test_subprocess_code_interpreter_run_with_multiple_lines( - subprocess_code_interpreter, capsys -): - code = "a = 5\nb = 10\nprint(a + b)" - result = list(subprocess_code_interpreter.run(code)) - assert any("15" in output.get("output", "") for output in result) + monkeypatch.setattr("sys.stdout", mock_readline()) + # More test code needed here to simulate and assert the behavior of handle_stream_output -def test_subprocess_code_interpreter_run_with_unicode_characters( - subprocess_code_interpreter, capsys -): - code = 'print("こんにちは、世界")' # Contains unicode characters - result = list(subprocess_code_interpreter.run(code)) - assert any( - "こんにちは、世界" in output.get("output", "") - for output in result - ) +# More tests needed for run method, error handling, and edge cases.