From 76c6a5dad200524ee6d3c2c03221a01d9e9027aa Mon Sep 17 00:00:00 2001 From: Kye Date: Mon, 11 Dec 2023 17:11:45 -0800 Subject: [PATCH] [CODE QUALITY][Refactor Migrations][LayoutLMDocumentQA] --- docs/swarms/models/vllm.md | 2 +- swarms/models/base_tts.py | 18 +++++++++---- swarms/models/layoutlm_document_qa.py | 18 ++++++++++--- swarms/models/multion.py | 37 ++++++++++++++++++++++----- swarms/models/openai_tts.py | 5 ++-- swarms/structs/sequential_workflow.py | 2 +- tests/models/test_multion.py | 31 +++++++++++++++------- 7 files changed, 85 insertions(+), 28 deletions(-) diff --git a/docs/swarms/models/vllm.md b/docs/swarms/models/vllm.md index 207e2e20..06a7c8f8 100644 --- a/docs/swarms/models/vllm.md +++ b/docs/swarms/models/vllm.md @@ -32,7 +32,7 @@ vLLM is designed to simplify the process of generating text using language model Before using vLLM, you need to install swarms. You can install vLLM using `pip`: ```bash -pip install swarms +pip install swarms vllm ``` ### vLLM Class diff --git a/swarms/models/base_tts.py b/swarms/models/base_tts.py index 613ab19a..0faaf6ff 100644 --- a/swarms/models/base_tts.py +++ b/swarms/models/base_tts.py @@ -6,7 +6,7 @@ from abc import ABC, abstractmethod class BaseTTSModel(AbstractLLM): """Base class for all TTS models. - + Args: AbstractLLM (_type_): _description_ model_name (_type_): _description_ @@ -14,18 +14,19 @@ class BaseTTSModel(AbstractLLM): chunk_size (_type_): _description_ save_to_file (bool, optional): _description_. Defaults to False. saved_filepath (Optional[str], optional): _description_. Defaults to None. - + Raises: NotImplementedError: _description_ - + Methods: save: save the model to a file. load: load the model from a file. run: run the model on the given task. __call__: call the model on the given task. save_to_file: save the speech data to a file. - + """ + def __init__( self, model_name, @@ -37,6 +38,8 @@ class BaseTTSModel(AbstractLLM): self.model_name = model_name self.voice = voice self.chunk_size = chunk_size + self.save_to_file = save_to_file + self.saved_filepath = saved_filepath def save(self, filepath: Optional[str] = None): """Save the model to a file. @@ -47,6 +50,11 @@ class BaseTTSModel(AbstractLLM): pass def load(self, filepath: Optional[str] = None): + """Load the model from a file. + + Args: + filepath (Optional[str], optional): _description_. Defaults to None. + """ pass @abstractmethod @@ -57,7 +65,7 @@ class BaseTTSModel(AbstractLLM): task (str): _description_ """ pass - + def __call__(self, task: str, *args, **kwargs): """Call the model on the given task. diff --git a/swarms/models/layoutlm_document_qa.py b/swarms/models/layoutlm_document_qa.py index 51851857..ad5b0628 100644 --- a/swarms/models/layoutlm_document_qa.py +++ b/swarms/models/layoutlm_document_qa.py @@ -3,9 +3,10 @@ LayoutLMDocumentQA is a multimodal good for visual question answering on real world docs lik invoice, pdfs, etc """ from transformers import pipeline +from swarms.models.base_multimodal_model import BaseMultiModalModel -class LayoutLMDocumentQA: +class LayoutLMDocumentQA(BaseMultiModalModel): """ LayoutLMDocumentQA for document question answering: @@ -25,13 +26,24 @@ class LayoutLMDocumentQA: self, model_name: str = "impira/layoutlm-document-qa", task_type: str = "document-question-answering", + *args, + **kwargs, ): + super(LayoutLMDocumentQA, self).__init__(*args, **kwargs) self.model_name = model_name self.task_type = task_type self.pipeline = pipeline(task_type, model=model_name) - def __call__(self, task: str, img_path: str): - """Call for model""" + def __call__(self, task: str, img_path: str, *args, **kwargs): + """Call the LayoutLMDocumentQA model + + Args: + task (str): _description_ + img_path (str): _description_ + + Returns: + _type_: _description_ + """ out = self.pipeline(img_path, task) out = str(out) return out diff --git a/swarms/models/multion.py b/swarms/models/multion.py index 93184961..14152faf 100644 --- a/swarms/models/multion.py +++ b/swarms/models/multion.py @@ -1,17 +1,18 @@ +from swarms.models.base_llm import AbstractLLM + + try: import multion except ImportError: raise ImportError( "Cannot import multion, please install 'pip install'" - ) -from swarms.models.base_llm import AbstractLLM -class MultiOn(AbstractLLM): +class MultiOn(AbstractLLM): """ - MultiOn is a wrapper for the Multion API. + MultiOn is a wrapper for the Multion API. Args: **kwargs: @@ -19,19 +20,41 @@ class MultiOn(AbstractLLM): Methods: run(self, task: str, url: str, *args, **kwargs) - Example: + Example: >>> from swarms.models.multion import MultiOn >>> multion = MultiOn() >>> multion.run("Order chicken tendies", "https://www.google.com/") "Order chicken tendies. https://www.google.com/" - + """ + def __init__(self, **kwargs): super(MultiOn, self).__init__(**kwargs) def run(self, task: str, url: str, *args, **kwargs) -> str: + """Run the multion model + + Args: + task (str): _description_ + url (str): _description_ + + Returns: + str: _description_ + """ response = multion.new_session({"input": task, "url": url}) return response - def generate_summary(self, task: str, url: str, *args, **kwargs) -> str: + + def generate_summary( + self, task: str, url: str, *args, **kwargs + ) -> str: + """Generate a summary from the multion model + + Args: + task (str): _description_ + url (str): _description_ + + Returns: + str: _description_ + """ response = multion.new_session({"input": task, "url": url}) return response diff --git a/swarms/models/openai_tts.py b/swarms/models/openai_tts.py index c416788d..e2746eb2 100644 --- a/swarms/models/openai_tts.py +++ b/swarms/models/openai_tts.py @@ -1,9 +1,10 @@ import os +import subprocess import sys -import openai + import requests from dotenv import load_dotenv -import subprocess + from swarms.models.base_llm import AbstractLLM try: diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 93165aee..49c529cb 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -361,7 +361,7 @@ class SequentialWorkflow: ) def workflow_bootup(self, **kwargs) -> None: - """Boots up the workflow.""" + """Bootup the workflow.""" print( colored( """ diff --git a/tests/models/test_multion.py b/tests/models/test_multion.py index 707c0798..416e6dc3 100644 --- a/tests/models/test_multion.py +++ b/tests/models/test_multion.py @@ -2,40 +2,53 @@ import pytest from unittest.mock import Mock, patch from swarms.models.multion import MultiOn + @pytest.fixture def multion_instance(): return MultiOn() + @pytest.fixture def mock_multion(): return Mock() + def test_multion_import(): with pytest.raises(ImportError): import multion + def test_multion_init(): multion = MultiOn() assert isinstance(multion, MultiOn) + def test_multion_run_with_valid_input(multion_instance, mock_multion): task = "Order chicken tendies" url = "https://www.google.com/" - mock_multion.new_session.return_value = "Order chicken tendies. https://www.google.com/" - - with patch('swarms.models.multion.multion', mock_multion): + mock_multion.new_session.return_value = ( + "Order chicken tendies. https://www.google.com/" + ) + + with patch("swarms.models.multion.multion", mock_multion): response = multion_instance.run(task, url) - - assert response == "Order chicken tendies. https://www.google.com/" -def test_multion_run_with_invalid_input(multion_instance, mock_multion): + assert ( + response == "Order chicken tendies. https://www.google.com/" + ) + + +def test_multion_run_with_invalid_input( + multion_instance, mock_multion +): task = "" url = "https://www.google.com/" mock_multion.new_session.return_value = None - - with patch('swarms.models.multion.multion', mock_multion): + + with patch("swarms.models.multion.multion", mock_multion): response = multion_instance.run(task, url) - + assert response is None + # Add more test cases to cover different scenarios, edge cases, and error handling as needed.