diff --git a/playground/models/together.py b/playground/models/together.py index 890bcd6d..f730f72f 100644 --- a/playground/models/together.py +++ b/playground/models/together.py @@ -1,7 +1,7 @@ -from swarms import TogetherModel +from swarms import TogetherLLM # Initialize the model with your parameters -model = TogetherModel( +model = TogetherLLM( model_name="mistralai/Mixtral-8x7B-Instruct-v0.1", max_tokens=1000, ) diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index 6a884648..d11cf00a 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -22,7 +22,7 @@ from swarms.models.modelscope_pipeline import ModelScopePipeline from swarms.models.modelscope_llm import ( ModelScopeAutoModel, ) # noqa: E402 -from swarms.models.together import TogetherModel # noqa: E402 +from swarms.models.together import TogetherLLM # noqa: E402 ################# MultiModal Models from swarms.models.base_multimodal_model import ( @@ -94,5 +94,5 @@ __all__ = [ "CogAgent", "ModelScopePipeline", "ModelScopeAutoModel", - "TogetherModel", + "TogetherLLM", ] diff --git a/swarms/models/together.py b/swarms/models/together.py index 88949a5c..e8f8968c 100644 --- a/swarms/models/together.py +++ b/swarms/models/together.py @@ -16,7 +16,7 @@ def together_api_key_env(): return os.getenv("TOGETHER_API_KEY") -class TogetherModel(AbstractLLM): +class TogetherLLM(AbstractLLM): """ GPT-4 Vision API @@ -65,7 +65,7 @@ class TogetherModel(AbstractLLM): *args, **kwargs, ): - super(TogetherModel).__init__(*args, **kwargs) + super(TogetherLLM).__init__(*args, **kwargs) self.together_api_key = together_api_key self.logging_enabled = logging_enabled self.model_name = model_name diff --git a/tests/test___init__.py b/tests/__init__.py similarity index 100% rename from tests/test___init__.py rename to tests/__init__.py diff --git a/tests/models/test_togther.py b/tests/models/test_togther.py index c28e69ae..43a99b00 100644 --- a/tests/models/test_togther.py +++ b/tests/models/test_togther.py @@ -1,7 +1,7 @@ import requests import pytest from unittest.mock import patch, Mock -from swarms.models.together import TogetherModel +from swarms.models.together import TogetherLLM import logging @@ -11,7 +11,7 @@ def mock_api_key(monkeypatch): def test_init_defaults(): - model = TogetherModel() + model = TogetherLLM() assert model.together_api_key == "mocked-api-key" assert model.logging_enabled is False assert model.model_name == "mistralai/Mixtral-8x7B-Instruct-v0.1" @@ -25,7 +25,7 @@ def test_init_defaults(): def test_init_custom_params(mock_api_key): - model = TogetherModel( + model = TogetherLLM( together_api_key="custom-api-key", logging_enabled=True, model_name="custom-model", @@ -57,7 +57,7 @@ def test_run_success(mock_post, mock_api_key): } mock_post.return_value = mock_response - model = TogetherModel() + model = TogetherLLM() task = "What is the color of the object?" response = model.run(task) @@ -70,7 +70,7 @@ def test_run_failure(mock_post, mock_api_key): "Request failed" ) - model = TogetherModel() + model = TogetherLLM() task = "What is the color of the object?" response = model.run(task) @@ -78,7 +78,7 @@ def test_run_failure(mock_post, mock_api_key): def test_run_with_logging_enabled(caplog, mock_api_key): - model = TogetherModel(logging_enabled=True) + model = TogetherLLM(logging_enabled=True) task = "What is the color of the object?" with caplog.at_level(logging.DEBUG): @@ -91,7 +91,7 @@ def test_run_with_logging_enabled(caplog, mock_api_key): "invalid_input", [None, 123, ["list", "of", "items"]] ) def test_invalid_task_input(invalid_input, mock_api_key): - model = TogetherModel() + model = TogetherLLM() response = model.run(invalid_input) assert response is None @@ -105,7 +105,7 @@ def test_run_streaming_enabled(mock_post, mock_api_key): } mock_post.return_value = mock_response - model = TogetherModel(streaming_enabled=True) + model = TogetherLLM(streaming_enabled=True) task = "What is the color of the object?" response = model.run(task) @@ -118,7 +118,7 @@ def test_run_empty_choices(mock_post, mock_api_key): mock_response.json.return_value = {"choices": []} mock_post.return_value = mock_response - model = TogetherModel() + model = TogetherLLM() task = "What is the color of the object?" response = model.run(task) @@ -129,7 +129,7 @@ def test_run_empty_choices(mock_post, mock_api_key): def test_run_with_exception(mock_post, mock_api_key): mock_post.side_effect = Exception("Test exception") - model = TogetherModel() + model = TogetherLLM() task = "What is the color of the object?" response = model.run(task) @@ -138,6 +138,6 @@ def test_run_with_exception(mock_post, mock_api_key): def test_init_logging_disabled(monkeypatch): monkeypatch.setenv("TOGETHER_API_KEY", "mocked-api-key") - model = TogetherModel() + model = TogetherLLM() assert model.logging_enabled is False assert not model.system_prompt