parent
a552a98373
commit
7e50f19f3f
@ -1,71 +0,0 @@
|
||||
import pytest
|
||||
from swarms.models.cog_agent import CogAgent
|
||||
from unittest.mock import MagicMock
|
||||
from PIL import Image
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cogagent_params():
|
||||
return {
|
||||
"model_name": "ZhipuAI/cogagent-chat",
|
||||
"tokenizer_name": "I-ModelScope/vicuna-7b-v1.5",
|
||||
"dtype": "torch.bfloat16",
|
||||
"low_cpu_mem_usage": True,
|
||||
"load_in_4bit": True,
|
||||
"trust_remote_code": True,
|
||||
"device": "cuda",
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cogagent(cogagent_params):
|
||||
return CogAgent(**cogagent_params)
|
||||
|
||||
|
||||
def test_init(mocker, cogagent_params, cogagent):
|
||||
mock_model = mocker.patch(
|
||||
"swarms.models.cog_agent.AutoModelForCausalLM.from_pretrained"
|
||||
)
|
||||
mock_tokenizer = mocker.patch(
|
||||
"swarms.models.cog_agent.AutoTokenizer.from_pretrained"
|
||||
)
|
||||
|
||||
for param, value in cogagent_params.items():
|
||||
assert getattr(cogagent, param) == value
|
||||
|
||||
mock_tokenizer.assert_called_once_with(
|
||||
cogagent_params["tokenizer_name"]
|
||||
)
|
||||
mock_model.assert_called_once_with(
|
||||
cogagent_params["model_name"],
|
||||
torch_dtype=cogagent_params["dtype"],
|
||||
low_cpu_mem_usage=cogagent_params["low_cpu_mem_usage"],
|
||||
load_in_4bit=cogagent_params["load_in_4bit"],
|
||||
trust_remote_code=cogagent_params["trust_remote_code"],
|
||||
)
|
||||
|
||||
|
||||
def test_run(mocker, cogagent):
|
||||
task = "How are you?"
|
||||
img = "images/1.jpg"
|
||||
mock_image = mocker.patch(
|
||||
"PIL.Image.open", return_value=MagicMock(spec=Image.Image)
|
||||
)
|
||||
cogagent.model.build_conversation_input_ids = MagicMock(
|
||||
return_value={
|
||||
"input_ids": MagicMock(),
|
||||
"token_type_ids": MagicMock(),
|
||||
"attention_mask": MagicMock(),
|
||||
"images": [MagicMock()],
|
||||
}
|
||||
)
|
||||
cogagent.model.__call__ = MagicMock(return_value="Mocked output")
|
||||
cogagent.decode = MagicMock(return_value="Mocked response")
|
||||
|
||||
output = cogagent.run(task, img)
|
||||
|
||||
assert output is not None
|
||||
mock_image.assert_called_once_with(img)
|
||||
cogagent.model.build_conversation_input_ids.assert_called_once()
|
||||
cogagent.model.__call__.assert_called_once()
|
||||
cogagent.decode.assert_called_once()
|
@ -1,39 +0,0 @@
|
||||
import pytest
|
||||
from swarms.models.modelscope_pipeline import ModelScopePipeline
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pipeline_params():
|
||||
return {
|
||||
"type_task": "text-generation",
|
||||
"model_name": "gpt2",
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pipeline_model(pipeline_params):
|
||||
return ModelScopePipeline(**pipeline_params)
|
||||
|
||||
|
||||
def test_init(mocker, pipeline_params, pipeline_model):
|
||||
mock_pipeline = mocker.patch(
|
||||
"swarms.models.modelscope_pipeline.pipeline"
|
||||
)
|
||||
|
||||
for param, value in pipeline_params.items():
|
||||
assert getattr(pipeline_model, param) == value
|
||||
|
||||
mock_pipeline.assert_called_once_with(
|
||||
pipeline_params["type_task"],
|
||||
model=pipeline_params["model_name"],
|
||||
)
|
||||
|
||||
|
||||
def test_run(mocker, pipeline_model):
|
||||
task = "Generate a 10,000 word blog on health and wellness."
|
||||
pipeline_model.model = MagicMock(return_value="Mocked output")
|
||||
|
||||
output = pipeline_model.run(task)
|
||||
|
||||
assert output is not None
|
@ -1,58 +0,0 @@
|
||||
import pytest
|
||||
from swarms.models.modelscope_llm import ModelScopeAutoModel
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def model_params():
|
||||
return {
|
||||
"model_name": "gpt2",
|
||||
"tokenizer_name": None,
|
||||
"device": "cuda",
|
||||
"device_map": "auto",
|
||||
"max_new_tokens": 500,
|
||||
"skip_special_tokens": True,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def modelscope(model_params):
|
||||
return ModelScopeAutoModel(**model_params)
|
||||
|
||||
|
||||
def test_init(mocker, model_params, modelscope):
|
||||
mock_model = mocker.patch(
|
||||
"swarms.models.modelscope_llm.AutoModelForCausalLM.from_pretrained"
|
||||
)
|
||||
mock_tokenizer = mocker.patch(
|
||||
"swarms.models.modelscope_llm.AutoTokenizer.from_pretrained"
|
||||
)
|
||||
|
||||
for param, value in model_params.items():
|
||||
assert getattr(modelscope, param) == value
|
||||
|
||||
mock_tokenizer.assert_called_once_with(
|
||||
model_params["tokenizer_name"]
|
||||
)
|
||||
mock_model.assert_called_once_with(
|
||||
model_params["model_name"],
|
||||
device_map=model_params["device_map"],
|
||||
)
|
||||
|
||||
|
||||
def test_run(mocker, modelscope):
|
||||
task = "Generate a 10,000 word blog on health and wellness."
|
||||
mocker.patch(
|
||||
"swarms.models.modelscope_llm.AutoTokenizer.decode",
|
||||
return_value="Mocked output",
|
||||
)
|
||||
modelscope.model.generate = MagicMock(
|
||||
return_value=["Mocked token"]
|
||||
)
|
||||
modelscope.tokenizer = MagicMock(
|
||||
return_value={"input_ids": "Mocked input_ids"}
|
||||
)
|
||||
|
||||
output = modelscope.run(task)
|
||||
|
||||
assert output is not None
|
@ -1,141 +0,0 @@
|
||||
import pytest
|
||||
from swarms.models.vllm import vLLM
|
||||
|
||||
|
||||
# Fixture for initializing vLLM
|
||||
@pytest.fixture
|
||||
def vllm_instance():
|
||||
return vLLM()
|
||||
|
||||
|
||||
# Test the default initialization of vLLM
|
||||
def test_vllm_default_init(vllm_instance):
|
||||
assert isinstance(vllm_instance, vLLM)
|
||||
assert vllm_instance.model_name == "facebook/opt-13b"
|
||||
assert vllm_instance.tensor_parallel_size == 4
|
||||
assert not vllm_instance.trust_remote_code
|
||||
assert vllm_instance.revision is None
|
||||
assert vllm_instance.temperature == 0.5
|
||||
assert vllm_instance.top_p == 0.95
|
||||
|
||||
|
||||
# Test custom initialization of vLLM
|
||||
def test_vllm_custom_init():
|
||||
vllm_instance = vLLM(
|
||||
model_name="custom_model",
|
||||
tensor_parallel_size=8,
|
||||
trust_remote_code=True,
|
||||
revision="123",
|
||||
temperature=0.7,
|
||||
top_p=0.9,
|
||||
)
|
||||
assert isinstance(vllm_instance, vLLM)
|
||||
assert vllm_instance.model_name == "custom_model"
|
||||
assert vllm_instance.tensor_parallel_size == 8
|
||||
assert vllm_instance.trust_remote_code
|
||||
assert vllm_instance.revision == "123"
|
||||
assert vllm_instance.temperature == 0.7
|
||||
assert vllm_instance.top_p == 0.9
|
||||
|
||||
|
||||
# Test the run method of vLLM
|
||||
def test_vllm_run(vllm_instance):
|
||||
task = "Hello, vLLM!"
|
||||
result = vllm_instance.run(task)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
|
||||
# Test run method with different temperature and top_p values
|
||||
@pytest.mark.parametrize(
|
||||
"temperature, top_p", [(0.2, 0.8), (0.8, 0.2)]
|
||||
)
|
||||
def test_vllm_run_with_params(vllm_instance, temperature, top_p):
|
||||
task = "Temperature and Top-P Test"
|
||||
result = vllm_instance.run(
|
||||
task, temperature=temperature, top_p=top_p
|
||||
)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
|
||||
# Test run method with a specific model revision
|
||||
def test_vllm_run_with_revision(vllm_instance):
|
||||
task = "Specific Model Revision Test"
|
||||
result = vllm_instance.run(task, revision="abc123")
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
|
||||
# Test run method with a specific model name
|
||||
def test_vllm_run_with_custom_model(vllm_instance):
|
||||
task = "Custom Model Test"
|
||||
custom_model_name = "my_custom_model"
|
||||
result = vllm_instance.run(task, model_name=custom_model_name)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
assert vllm_instance.model_name == custom_model_name
|
||||
|
||||
|
||||
# Test run method with invalid task input
|
||||
def test_vllm_run_invalid_task(vllm_instance):
|
||||
invalid_task = None
|
||||
with pytest.raises(ValueError):
|
||||
vllm_instance.run(invalid_task)
|
||||
|
||||
|
||||
# Test run method with a very high temperature value
|
||||
def test_vllm_run_high_temperature(vllm_instance):
|
||||
task = "High Temperature Test"
|
||||
high_temperature = 10.0
|
||||
result = vllm_instance.run(task, temperature=high_temperature)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
|
||||
# Test run method with a very low top_p value
|
||||
def test_vllm_run_low_top_p(vllm_instance):
|
||||
task = "Low Top-P Test"
|
||||
low_top_p = 0.01
|
||||
result = vllm_instance.run(task, top_p=low_top_p)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
|
||||
# Test run method with an empty task
|
||||
def test_vllm_run_empty_task(vllm_instance):
|
||||
empty_task = ""
|
||||
result = vllm_instance.run(empty_task)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) == 0
|
||||
|
||||
|
||||
# Test initialization with invalid parameters
|
||||
def test_vllm_invalid_init():
|
||||
with pytest.raises(ValueError):
|
||||
vLLM(
|
||||
model_name=None,
|
||||
tensor_parallel_size=-1,
|
||||
trust_remote_code="invalid",
|
||||
revision=123,
|
||||
temperature=-0.1,
|
||||
top_p=1.1,
|
||||
)
|
||||
|
||||
|
||||
# Test running vLLM with a large number of parallel heads
|
||||
def test_vllm_large_parallel_heads():
|
||||
vllm_instance = vLLM(tensor_parallel_size=16)
|
||||
task = "Large Parallel Heads Test"
|
||||
result = vllm_instance.run(task)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
|
||||
# Test running vLLM with trust_remote_code set to True
|
||||
def test_vllm_trust_remote_code():
|
||||
vllm_instance = vLLM(trust_remote_code=True)
|
||||
task = "Trust Remote Code Test"
|
||||
result = vllm_instance.run(task)
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
Loading…
Reference in new issue