[CODE QUALITY][Refactor Migrations][LayoutLMDocumentQA]

pull/293/head
Kye 1 year ago
parent 829538f46a
commit 76c6a5dad2

@ -32,7 +32,7 @@ vLLM is designed to simplify the process of generating text using language model
Before using vLLM, you need to install swarms. You can install vLLM using `pip`:
```bash
pip install swarms
pip install swarms vllm
```
### vLLM Class <a name="vllm-class"></a>

@ -6,7 +6,7 @@ from abc import ABC, abstractmethod
class BaseTTSModel(AbstractLLM):
"""Base class for all TTS models.
Args:
AbstractLLM (_type_): _description_
model_name (_type_): _description_
@ -14,18 +14,19 @@ class BaseTTSModel(AbstractLLM):
chunk_size (_type_): _description_
save_to_file (bool, optional): _description_. Defaults to False.
saved_filepath (Optional[str], optional): _description_. Defaults to None.
Raises:
NotImplementedError: _description_
Methods:
save: save the model to a file.
load: load the model from a file.
run: run the model on the given task.
__call__: call the model on the given task.
save_to_file: save the speech data to a file.
"""
def __init__(
self,
model_name,
@ -37,6 +38,8 @@ class BaseTTSModel(AbstractLLM):
self.model_name = model_name
self.voice = voice
self.chunk_size = chunk_size
self.save_to_file = save_to_file
self.saved_filepath = saved_filepath
def save(self, filepath: Optional[str] = None):
"""Save the model to a file.
@ -47,6 +50,11 @@ class BaseTTSModel(AbstractLLM):
pass
def load(self, filepath: Optional[str] = None):
"""Load the model from a file.
Args:
filepath (Optional[str], optional): _description_. Defaults to None.
"""
pass
@abstractmethod
@ -57,7 +65,7 @@ class BaseTTSModel(AbstractLLM):
task (str): _description_
"""
pass
def __call__(self, task: str, *args, **kwargs):
"""Call the model on the given task.

@ -3,9 +3,10 @@ LayoutLMDocumentQA is a multimodal good for
visual question answering on real world docs lik invoice, pdfs, etc
"""
from transformers import pipeline
from swarms.models.base_multimodal_model import BaseMultiModalModel
class LayoutLMDocumentQA:
class LayoutLMDocumentQA(BaseMultiModalModel):
"""
LayoutLMDocumentQA for document question answering:
@ -25,13 +26,24 @@ class LayoutLMDocumentQA:
self,
model_name: str = "impira/layoutlm-document-qa",
task_type: str = "document-question-answering",
*args,
**kwargs,
):
super(LayoutLMDocumentQA, self).__init__(*args, **kwargs)
self.model_name = model_name
self.task_type = task_type
self.pipeline = pipeline(task_type, model=model_name)
def __call__(self, task: str, img_path: str):
"""Call for model"""
def __call__(self, task: str, img_path: str, *args, **kwargs):
"""Call the LayoutLMDocumentQA model
Args:
task (str): _description_
img_path (str): _description_
Returns:
_type_: _description_
"""
out = self.pipeline(img_path, task)
out = str(out)
return out

@ -1,17 +1,18 @@
from swarms.models.base_llm import AbstractLLM
try:
import multion
except ImportError:
raise ImportError(
"Cannot import multion, please install 'pip install'"
)
from swarms.models.base_llm import AbstractLLM
class MultiOn(AbstractLLM):
class MultiOn(AbstractLLM):
"""
MultiOn is a wrapper for the Multion API.
MultiOn is a wrapper for the Multion API.
Args:
**kwargs:
@ -19,19 +20,41 @@ class MultiOn(AbstractLLM):
Methods:
run(self, task: str, url: str, *args, **kwargs)
Example:
Example:
>>> from swarms.models.multion import MultiOn
>>> multion = MultiOn()
>>> multion.run("Order chicken tendies", "https://www.google.com/")
"Order chicken tendies. https://www.google.com/"
"""
def __init__(self, **kwargs):
super(MultiOn, self).__init__(**kwargs)
def run(self, task: str, url: str, *args, **kwargs) -> str:
"""Run the multion model
Args:
task (str): _description_
url (str): _description_
Returns:
str: _description_
"""
response = multion.new_session({"input": task, "url": url})
return response
def generate_summary(self, task: str, url: str, *args, **kwargs) -> str:
def generate_summary(
self, task: str, url: str, *args, **kwargs
) -> str:
"""Generate a summary from the multion model
Args:
task (str): _description_
url (str): _description_
Returns:
str: _description_
"""
response = multion.new_session({"input": task, "url": url})
return response

@ -1,9 +1,10 @@
import os
import subprocess
import sys
import openai
import requests
from dotenv import load_dotenv
import subprocess
from swarms.models.base_llm import AbstractLLM
try:

@ -361,7 +361,7 @@ class SequentialWorkflow:
)
def workflow_bootup(self, **kwargs) -> None:
"""Boots up the workflow."""
"""Bootup the workflow."""
print(
colored(
"""

@ -2,40 +2,53 @@ import pytest
from unittest.mock import Mock, patch
from swarms.models.multion import MultiOn
@pytest.fixture
def multion_instance():
return MultiOn()
@pytest.fixture
def mock_multion():
return Mock()
def test_multion_import():
with pytest.raises(ImportError):
import multion
def test_multion_init():
multion = MultiOn()
assert isinstance(multion, MultiOn)
def test_multion_run_with_valid_input(multion_instance, mock_multion):
task = "Order chicken tendies"
url = "https://www.google.com/"
mock_multion.new_session.return_value = "Order chicken tendies. https://www.google.com/"
with patch('swarms.models.multion.multion', mock_multion):
mock_multion.new_session.return_value = (
"Order chicken tendies. https://www.google.com/"
)
with patch("swarms.models.multion.multion", mock_multion):
response = multion_instance.run(task, url)
assert response == "Order chicken tendies. https://www.google.com/"
def test_multion_run_with_invalid_input(multion_instance, mock_multion):
assert (
response == "Order chicken tendies. https://www.google.com/"
)
def test_multion_run_with_invalid_input(
multion_instance, mock_multion
):
task = ""
url = "https://www.google.com/"
mock_multion.new_session.return_value = None
with patch('swarms.models.multion.multion', mock_multion):
with patch("swarms.models.multion.multion", mock_multion):
response = multion_instance.run(task, url)
assert response is None
# Add more test cases to cover different scenarios, edge cases, and error handling as needed.

Loading…
Cancel
Save