From dabfcffc566fa1fffab14c285e5bed9a19953ba7 Mon Sep 17 00:00:00 2001
From: Kye <kye@apacmediasolutions.com>
Date: Mon, 4 Dec 2023 22:59:02 -0800
Subject: [PATCH] [TESTS]

---
 docs/swarms/models/huggingface.md |  4 ++--
 swarms/models/yarn_mistral.py     |  2 +-
 tests/models/test_huggingface.py  | 22 +++++-----------------
 3 files changed, 8 insertions(+), 20 deletions(-)

diff --git a/docs/swarms/models/huggingface.md b/docs/swarms/models/huggingface.md
index e429f080..8606d8f2 100644
--- a/docs/swarms/models/huggingface.md
+++ b/docs/swarms/models/huggingface.md
@@ -96,7 +96,7 @@ Here are three ways to use the `HuggingfaceLLM` class:
 from swarms.models import HuggingfaceLLM
 
 # Initialize the HuggingfaceLLM instance with a model ID
-model_id = "gpt2-small"
+model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha"
 inference = HuggingfaceLLM(model_id=model_id)
 
 # Generate text based on a prompt
@@ -116,7 +116,7 @@ custom_config = {
     "quantization_config": {"load_in_4bit": True},
     "verbose": True
 }
-inference = HuggingfaceLLM(model_id="gpt2-small", **custom_config)
+inference = HuggingfaceLLM(model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config)
 
 # Generate text based on a prompt
 prompt_text = "Tell me a joke"
diff --git a/swarms/models/yarn_mistral.py b/swarms/models/yarn_mistral.py
index 7b5a9c02..ff65b856 100644
--- a/swarms/models/yarn_mistral.py
+++ b/swarms/models/yarn_mistral.py
@@ -26,7 +26,7 @@ class YarnMistral128:
     ```
     from finetuning_suite import Inference
 
-    model_id = "gpt2-small"
+    model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha"
     inference = Inference(model_id=model_id)
 
     prompt_text = "Once upon a time"
diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py
index 8d53b8e0..326a66cf 100644
--- a/tests/models/test_huggingface.py
+++ b/tests/models/test_huggingface.py
@@ -11,14 +11,14 @@ from swarms.models.huggingface import (
 # Fixture for the class instance
 @pytest.fixture
 def llm_instance():
-    model_id = "gpt2-small"
+    model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha"
     instance = HuggingfaceLLM(model_id=model_id)
     return instance
 
 
 # Test for instantiation and attributes
 def test_llm_initialization(llm_instance):
-    assert llm_instance.model_id == "gpt2-small"
+    assert llm_instance.model_id == "NousResearch/Nous-Hermes-2-Vision-Alpha"
     assert llm_instance.max_length == 500
     # ... add more assertions for all default attributes
 
@@ -75,9 +75,9 @@ def test_llm_memory_consumption(llm_instance):
 @pytest.mark.parametrize(
     "model_id, max_length",
     [
-        ("gpt2-small", 100),
-        ("gpt2-medium", 200),
-        ("gpt2-large", None),  # None to check default behavior
+        ("NousResearch/Nous-Hermes-2-Vision-Alpha", 100),
+        ("microsoft/Orca-2-13b", 200),
+        ("berkeley-nest/Starling-LM-7B-alpha", None),  # None to check default behavior
     ],
 )
 def test_llm_initialization_params(model_id, max_length):
@@ -99,12 +99,6 @@ def test_llm_set_invalid_device(llm_instance):
         llm_instance.set_device("quantum_processor")
 
 
-# Test for model download progress bar
-@patch("swarms.models.huggingface.HuggingfaceLLM._download_model")
-def test_llm_model_download_progress(mock_download, llm_instance):
-    llm_instance.download_model_with_progress()
-    mock_download.assert_called_once()
-
 
 # Mocking external API call to test run method without network
 @patch("swarms.models.huggingface.HuggingfaceLLM.run")
@@ -209,7 +203,6 @@ def test_llm_force_gpu_when_unavailable(
 
 # Test for proper cleanup after model use (releasing resources)
 @patch("swarms.models.huggingface.HuggingfaceLLM._model")
-@patch("swarms.models.huggingface.HuggingfaceLLM._tokenizer")
 def test_llm_cleanup(mock_model, mock_tokenizer, llm_instance):
     llm_instance.cleanup()
     # Assuming cleanup method is meant to free resources
@@ -217,11 +210,6 @@ def test_llm_cleanup(mock_model, mock_tokenizer, llm_instance):
     mock_tokenizer.delete.assert_called_once()
 
 
-# Test updating the configuration after instantiation
-def test_llm_update_configuration(llm_instance):
-    new_config = {"temperature": 0.7}
-    llm_instance.update_configuration(new_config)
-    assert llm_instance.configuration["temperature"] == 0.7
 
 
 # Test if the model is re-downloaded when changing the model_id