[TESTS] [GraphWorkflow] [CogAgent] [ModelScopePipeline] [ModelScopeAutoModel]

pull/346/head
Kye 1 year ago
parent 24811bb9a2
commit f6458c6abd

@ -875,6 +875,31 @@ print(video_path)
```
### ModelScope
```python
from swarms.models import ModelScopeAutoModel
# Initialize the model
mp = ModelScopeAutoModel(
model_name="AI-ModelScope/Mixtral-8x7B-Instruct-v0.1",
)
mp.run("Generate a 10,000 word blog on health and wellness.")
```
```python
from swarms import CogAgent
# Initialize CogAgent
cog_agent = CogAgent()
# Run the model on the tests
cog_agent.run("Describe this scene", "images/1.jpg")
```
----
## Supported Models :heavy_check_mark:

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "3.4.7"
version = "3.4.8"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -29,6 +29,8 @@ from swarms.structs.utils import (
)
from swarms.structs.task import Task
from swarms.structs.block_wrapper import block
from swarms.structs.graph_workflow import GraphWorkflow
__all__ = [
"Agent",
@ -59,4 +61,5 @@ __all__ = [
"detect_markdown",
"Task",
"block",
"GraphWorkflow",
]

@ -17,6 +17,13 @@ class GraphWorkflow(BaseStructure):
add_edge(from_node, to_node): Adds an edge between two nodes in the graph.
add_conditional_edges(from_node, condition, edge_dict): Adds conditional edges from a node to multiple nodes based on a condition.
run(): Runs the workflow and returns the graph.
Examples:
>>> from swarms.structs import GraphWorkflow
>>> graph = GraphWorkflow()
>>> graph.add("start", "Start")
>>> graph.add("end", "End")
>>> graph.start("start")
"""
def __init__(self):

@ -0,0 +1,48 @@
from typing import Optional, List
import multiprocessing as mp
from swarms.structs.base import BaseStructure
class LoadBalancer(BaseStructure):
def __init__(
self,
num_workers: int = 1,
agents: Optional[List] = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.num_workers = num_workers
self.agents = agents
self.tasks = []
self.results = []
self.workers = []
self._init_workers()
def _init_workers(self):
for i in range(self.num_workers):
worker = mp.Process(target=self._worker)
worker.start()
self.workers.append(worker)
def _worker(self):
while True:
task = self._get_task()
if task is None:
break
result = self._run_task(task)
self._add_result(result)
def _get_task(self):
if len(self.tasks) == 0:
return None
return self.tasks.pop(0)
def _run_task(self, task):
return task()
def _add_result(self, result):
self.results.append(result)
def add_task(self, task):
self.tasks.append(task)

@ -0,0 +1,71 @@
import pytest
from swarms.models.cog_agent import CogAgent
from unittest.mock import MagicMock
from PIL import Image
@pytest.fixture
def cogagent_params():
return {
"model_name": "ZhipuAI/cogagent-chat",
"tokenizer_name": "I-ModelScope/vicuna-7b-v1.5",
"dtype": "torch.bfloat16",
"low_cpu_mem_usage": True,
"load_in_4bit": True,
"trust_remote_code": True,
"device": "cuda",
}
@pytest.fixture
def cogagent(cogagent_params):
return CogAgent(**cogagent_params)
def test_init(mocker, cogagent_params, cogagent):
mock_model = mocker.patch(
"swarms.models.cog_agent.AutoModelForCausalLM.from_pretrained"
)
mock_tokenizer = mocker.patch(
"swarms.models.cog_agent.AutoTokenizer.from_pretrained"
)
for param, value in cogagent_params.items():
assert getattr(cogagent, param) == value
mock_tokenizer.assert_called_once_with(
cogagent_params["tokenizer_name"]
)
mock_model.assert_called_once_with(
cogagent_params["model_name"],
torch_dtype=cogagent_params["dtype"],
low_cpu_mem_usage=cogagent_params["low_cpu_mem_usage"],
load_in_4bit=cogagent_params["load_in_4bit"],
trust_remote_code=cogagent_params["trust_remote_code"],
)
def test_run(mocker, cogagent):
task = "How are you?"
img = "images/1.jpg"
mock_image = mocker.patch(
"PIL.Image.open", return_value=MagicMock(spec=Image.Image)
)
cogagent.model.build_conversation_input_ids = MagicMock(
return_value={
"input_ids": MagicMock(),
"token_type_ids": MagicMock(),
"attention_mask": MagicMock(),
"images": [MagicMock()],
}
)
cogagent.model.__call__ = MagicMock(return_value="Mocked output")
cogagent.decode = MagicMock(return_value="Mocked response")
output = cogagent.run(task, img)
assert output is not None
mock_image.assert_called_once_with(img)
cogagent.model.build_conversation_input_ids.assert_called_once()
cogagent.model.__call__.assert_called_once()
cogagent.decode.assert_called_once()

@ -0,0 +1,39 @@
import pytest
from swarms.models.modelscope_pipeline import ModelScopePipeline
from unittest.mock import MagicMock
@pytest.fixture
def pipeline_params():
return {
"type_task": "text-generation",
"model_name": "gpt2",
}
@pytest.fixture
def pipeline_model(pipeline_params):
return ModelScopePipeline(**pipeline_params)
def test_init(mocker, pipeline_params, pipeline_model):
mock_pipeline = mocker.patch(
"swarms.models.modelscope_pipeline.pipeline"
)
for param, value in pipeline_params.items():
assert getattr(pipeline_model, param) == value
mock_pipeline.assert_called_once_with(
pipeline_params["type_task"],
model=pipeline_params["model_name"],
)
def test_run(mocker, pipeline_model):
task = "Generate a 10,000 word blog on health and wellness."
pipeline_model.model = MagicMock(return_value="Mocked output")
output = pipeline_model.run(task)
assert output is not None

@ -0,0 +1,58 @@
import pytest
from swarms.models.modelscope_llm import ModelScopeAutoModel
from unittest.mock import MagicMock
@pytest.fixture
def model_params():
return {
"model_name": "gpt2",
"tokenizer_name": None,
"device": "cuda",
"device_map": "auto",
"max_new_tokens": 500,
"skip_special_tokens": True,
}
@pytest.fixture
def modelscope(model_params):
return ModelScopeAutoModel(**model_params)
def test_init(mocker, model_params, modelscope):
mock_model = mocker.patch(
"swarms.models.modelscope_llm.AutoModelForCausalLM.from_pretrained"
)
mock_tokenizer = mocker.patch(
"swarms.models.modelscope_llm.AutoTokenizer.from_pretrained"
)
for param, value in model_params.items():
assert getattr(modelscope, param) == value
mock_tokenizer.assert_called_once_with(
model_params["tokenizer_name"]
)
mock_model.assert_called_once_with(
model_params["model_name"],
device_map=model_params["device_map"],
)
def test_run(mocker, modelscope):
task = "Generate a 10,000 word blog on health and wellness."
mocker.patch(
"swarms.models.modelscope_llm.AutoTokenizer.decode",
return_value="Mocked output",
)
modelscope.model.generate = MagicMock(
return_value=["Mocked token"]
)
modelscope.tokenizer = MagicMock(
return_value={"input_ids": "Mocked input_ids"}
)
output = modelscope.run(task)
assert output is not None

@ -0,0 +1,56 @@
import pytest
from swarms.structs.graph_workflow import GraphWorkflow
@pytest.fixture
def graph_workflow():
return GraphWorkflow()
def test_init(graph_workflow):
assert graph_workflow.graph == {}
assert graph_workflow.entry_point is None
def test_add(graph_workflow):
graph_workflow.add("node1", "value1")
assert "node1" in graph_workflow.graph
assert graph_workflow.graph["node1"]["value"] == "value1"
assert graph_workflow.graph["node1"]["edges"] == {}
def test_set_entry_point(graph_workflow):
graph_workflow.add("node1", "value1")
graph_workflow.set_entry_point("node1")
assert graph_workflow.entry_point == "node1"
def test_set_entry_point_nonexistent_node(graph_workflow):
with pytest.raises(ValueError, match="Node does not exist in graph"):
graph_workflow.set_entry_point("nonexistent")
def test_add_edge(graph_workflow):
graph_workflow.add("node1", "value1")
graph_workflow.add("node2", "value2")
graph_workflow.add_edge("node1", "node2")
assert "node2" in graph_workflow.graph["node1"]["edges"]
def test_add_edge_nonexistent_node(graph_workflow):
graph_workflow.add("node1", "value1")
with pytest.raises(ValueError, match="Node does not exist in graph"):
graph_workflow.add_edge("node1", "nonexistent")
def test_add_conditional_edges(graph_workflow):
graph_workflow.add("node1", "value1")
graph_workflow.add("node2", "value2")
graph_workflow.add_conditional_edges("node1", "condition1", {"condition_value1": "node2"})
assert "node2" in graph_workflow.graph["node1"]["edges"]
def test_add_conditional_edges_nonexistent_node(graph_workflow):
graph_workflow.add("node1", "value1")
with pytest.raises(ValueError, match="Node does not exist in graph"):
graph_workflow.add_conditional_edges("node1", "condition1", {"condition_value1": "nonexistent"})
def test_run(graph_workflow):
graph_workflow.add("node1", "value1")
graph_workflow.set_entry_point("node1")
assert graph_workflow.run() == graph_workflow.graph
def test_run_no_entry_point(graph_workflow):
with pytest.raises(ValueError, match="Entry point not set"):
graph_workflow.run()
Loading…
Cancel
Save