[FEATS] [ModelScopeAutoModel] [ModelScopePipeline]

pull/346/head
Kye 1 year ago
parent 91c023c908
commit 24811bb9a2

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "3.4.4"
version = "3.4.7"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -1,4 +1,4 @@
# LLMs
############################################ LLMs
from swarms.models.base_llm import AbstractLLM # noqa: E402
from swarms.models.anthropic import Anthropic # noqa: E402
from swarms.models.petals import Petals # noqa: E402
@ -18,6 +18,10 @@ from swarms.models.wizard_storytelling import (
) # noqa: E402
from swarms.models.mpt import MPT7B # noqa: E402
from swarms.models.mixtral import Mixtral # noqa: E402
from swarms.models.modelscope_pipeline import ModelScopePipeline
from swarms.models.modelscope_llm import (
ModelScopeAutoModel,
) # noqa: E402
################# MultiModal Models
from swarms.models.base_multimodal_model import (
@ -35,12 +39,12 @@ from swarms.models.gemini import Gemini # noqa: E402
from swarms.models.gigabind import Gigabind # noqa: E402
from swarms.models.zeroscope import ZeroscopeTTV # noqa: E402
# from swarms.models.gpt4v import GPT4Vision
# from swarms.models.dalle3 import Dalle3
# from swarms.models.distilled_whisperx import DistilWhisperModel # noqa: E402
# from swarms.models.whisperx_model import WhisperX # noqa: E402
# from swarms.models.kosmos_two import Kosmos # noqa: E402
# from swarms.models.cog_agent import CogAgent # noqa: E402
from swarms.models.cog_agent import CogAgent # noqa: E402
from swarms.models.types import (
TextModality,
@ -84,5 +88,7 @@ __all__ = [
"AudioModality",
"VideoModality",
"MultimodalData",
# "CogAgent"
"CogAgent",
"ModelScopePipeline",
"ModelScopeAutoModel",
]

@ -0,0 +1,83 @@
from typing import Optional
from modelscope import AutoModelForCausalLM, AutoTokenizer
from swarms.models.base_llm import AbstractLLM
class ModelScopeAutoModel(AbstractLLM):
"""
ModelScopeAutoModel is a class that represents a model for generating text using the ModelScope framework.
Args:
model_name (str): The name or path of the pre-trained model.
tokenizer_name (str, optional): The name or path of the tokenizer to use. Defaults to None.
device (str, optional): The device to use for model inference. Defaults to "cuda".
device_map (str, optional): The device mapping for multi-GPU setups. Defaults to "auto".
max_new_tokens (int, optional): The maximum number of new tokens to generate. Defaults to 500.
skip_special_tokens (bool, optional): Whether to skip special tokens during decoding. Defaults to True.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Attributes:
tokenizer (AutoTokenizer): The tokenizer used for tokenizing input text.
model (AutoModelForCausalLM): The pre-trained model for generating text.
Methods:
run(task, *args, **kwargs): Generates text based on the given task.
Examples:
>>> from swarms.models import ModelScopeAutoModel
>>> mp = ModelScopeAutoModel(
... model_name="gpt2",
... )
>>> mp.run("Generate a 10,000 word blog on health and wellness.")
"""
def __init__(
self,
model_name: str,
tokenizer_name: Optional[str] = None,
device: str = "cuda",
device_map: str = "auto",
max_new_tokens: int = 500,
skip_special_tokens: bool = True,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.model_name = model_name
self.tokenizer_name = tokenizer_name
self.device = device
self.device_map = device_map
self.max_new_tokens = max_new_tokens
self.skip_special_tokens = skip_special_tokens
self.tokenizer = AutoTokenizer.from_pretrained(
self.tokenizer_name
)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map=device_map * args, **kwargs
)
def run(self, task: str, *args, **kwargs):
"""
Run the model on the given task.
Parameters:
task (str): The input task to be processed.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
str: The generated output from the model.
"""
text = self.tokenizer(task, return_tensors="pt")
outputs = self.model.generate(
**text, max_new_tokens=self.max_new_tokens, **kwargs
)
return self.tokenizer.decode(
outputs[0], skip_special_tokens=self.skip_special_tokens
)

@ -0,0 +1,58 @@
from modelscope.pipelines import pipeline
from swarms.models.base_llm import AbstractLLM
class ModelScopePipeline(AbstractLLM):
"""
A class representing a ModelScope pipeline.
Args:
type_task (str): The type of task for the pipeline.
model_name (str): The name of the model for the pipeline.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Attributes:
type_task (str): The type of task for the pipeline.
model_name (str): The name of the model for the pipeline.
model: The pipeline model.
Methods:
run: Runs the pipeline for a given task.
Examples:
>>> from swarms.models import ModelScopePipeline
>>> mp = ModelScopePipeline(
... type_task="text-generation",
... model_name="gpt2",
... )
>>> mp.run("Generate a 10,000 word blog on health and wellness.")
"""
def __init__(
self, type_task: str, model_name: str, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.type_task = type_task
self.model_name = model_name
self.model = pipeline(
self.type_task, model=self.model_name, *args, **kwargs
)
def run(self, task: str, *args, **kwargs):
"""
Runs the pipeline for a given task.
Args:
task (str): The task to be performed by the pipeline.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
The result of running the pipeline on the given task.
"""
return self.model(task, *args, **kwargs)

@ -26,6 +26,11 @@ class BlocksDict(BaseStructure):
add(key: str, block: Any): Add a block to the dictionary.
remove(key: str): Remove a block from the dictionary.
get(key: str): Get a block from the dictionary.
update(key: str, block: Any): Update a block in the dictionary.
keys(): Get a list of keys in the dictionary.
values(): Get a list of values in the dictionary.
items(): Get a list of key-value pairs in the dictionary.
clear(): Clear all blocks from the dictionary.
"""
def __init__(
@ -41,25 +46,75 @@ class BlocksDict(BaseStructure):
self.blocks = blocks
def add(self, key: str, block: Any):
"""
Add a block to the dictionary.
Args:
key (str): The key of the block.
block (Any): The block to be added.
"""
self.blocks[key] = block
def remove(self, key: str):
"""
Remove a block from the dictionary.
Args:
key (str): The key of the block to be removed.
"""
del self.blocks[key]
def get(self, key: str):
"""
Get a block from the dictionary.
Args:
key (str): The key of the block to be retrieved.
Returns:
Any: The retrieved block.
"""
return self.blocks.get(key)
def update(self, key: str, block: Any):
"""
Update a block in the dictionary.
Args:
key (str): The key of the block to be updated.
block (Any): The updated block.
"""
self.blocks[key] = block
def keys(self):
"""
Get a list of keys in the dictionary.
Returns:
List[str]: A list of keys.
"""
return list(self.blocks.keys())
def values(self):
"""
Get a list of values in the dictionary.
Returns:
List[Any]: A list of values.
"""
return list(self.blocks.values())
def items(self):
"""
Get a list of key-value pairs in the dictionary.
Returns:
List[Tuple[str, Any]]: A list of key-value pairs.
"""
return list(self.blocks.items())
def clear(self):
"""
Clear all blocks from the dictionary.
"""
self.blocks.clear()

@ -9,20 +9,60 @@ from swarms.structs.base import BaseStructure
class Conversation(BaseStructure):
"""
Conversation class
A class structure to represent a conversation in a chatbot. This class is used to store the conversation history.
And, it can be used to save the conversation history to a file, load the conversation history from a file, and
display the conversation history. We can also use this class to add the conversation history to a database, query
the conversation history from a database, delete the conversation history from a database, update the conversation
history from a database, and get the conversation history from a database.
Attributes:
time_enabled (bool): whether to enable time
conversation_history (list): list of messages in the conversation
Args:
time_enabled (bool, optional): Whether to enable time. Defaults to False.
database (AbstractDatabase, optional): The database to use. Defaults to None.
autosave (bool, optional): Whether to autosave. Defaults to True.
save_filepath (str, optional): The filepath to save to. Defaults to "runs/conversation.json".
*args: Additional arguments.
**kwargs: Additional keyword arguments.
Attributes:
time_enabled (bool): Whether to enable time.
database (AbstractDatabase): The database to use.
autosave (bool): Whether to autosave.
save_filepath (str): The filepath to save to.
conversation_history (list): The conversation history.
Methods:
add(role: str, content: str): Add a message to the conversation history.
delete(index: str): Delete a message from the conversation history.
update(index: str, role, content): Update a message in the conversation history.
query(index: str): Query a message in the conversation history.
search(keyword: str): Search for a message in the conversation history.
display_conversation(detailed: bool = False): Display the conversation history.
export_conversation(filename: str): Export the conversation history to a file.
import_conversation(filename: str): Import a conversation history from a file.
count_messages_by_role(): Count the number of messages by role.
return_history_as_string(): Return the conversation history as a string.
save_as_json(filename: str): Save the conversation history as a JSON file.
load_from_json(filename: str): Load the conversation history from a JSON file.
search_keyword_in_conversation(keyword: str): Search for a keyword in the conversation history.
pretty_print_conversation(messages): Pretty print the conversation history.
add_to_database(): Add the conversation history to the database.
query_from_database(query): Query the conversation history from the database.
delete_from_database(): Delete the conversation history from the database.
update_from_database(): Update the conversation history from the database.
get_from_database(): Get the conversation history from the database.
execute_query_from_database(query): Execute a query on the database.
fetch_all_from_database(): Fetch all from the database.
fetch_one_from_database(): Fetch one from the database.
Examples:
>>> conv = Conversation()
>>> conv.add("user", "Hello, world!")
>>> conv.add("assistant", "Hello, user!")
>>> conv.display_conversation()
user: Hello, world!
>>> from swarms import Conversation
>>> conversation = Conversation()
>>> conversation.add("user", "Hello, how are you?")
>>> conversation.add("assistant", "I am doing well, thanks.")
>>> conversation.display_conversation()
user: Hello, how are you?
assistant: I am doing well, thanks.
"""

@ -13,28 +13,39 @@ logger = logging.getLogger(__name__)
class ModelParallelizer:
"""
ModelParallelizer
-----
Architecture:
How it works:
1. ModelParallelizer receives a task from the user.
2. ModelParallelizer distributes the task to all LLMs.
3. ModelParallelizer collects the responses from all LLMs.
4. ModelParallelizer prints the responses from all LLMs.
Parameters:
llms: list of LLMs
Methods:
run(task): distribute task to all LLMs and collect responses
print_responses(task): print responses from all LLMs
Usage:
parallelizer = ModelParallelizer(llms)
parallelizer.run(task)
parallelizer.print_responses(task)
ModelParallelizer, a class that parallelizes the execution of a task
across multiple language models. It is a wrapper around the
LanguageModel class.
Args:
llms (List[Callable]): A list of language models.
retry_attempts (int): The number of retry attempts.
iters (int): The number of iterations to run the task.
Attributes:
llms (List[Callable]): A list of language models.
retry_attempts (int): The number of retry attempts.
iters (int): The number of iterations to run the task.
last_responses (List[str]): The last responses from the language
models.
task_history (List[str]): The task history.
Examples:
>>> from swarms.structs import ModelParallelizer
>>> from swarms.llms import OpenAIChat
>>> llms = [
... OpenAIChat(
... temperature=0.5,
... openai_api_key="OPENAI_API_KEY",
... ),
... OpenAIChat(
... temperature=0.5,
... openai_api_key="OPENAI_API_KEY",
... ),
... ]
>>> mp = ModelParallelizer(llms)
>>> mp.run("Generate a 10,000 word blog on health and wellness.")
['Generate a 10,000 word blog on health and wellness.', 'Generate a 10,000 word blog on health and wellness.']
"""

@ -54,7 +54,7 @@ class SwarmNetwork(BaseStructure):
agents (List[Agent]): A list of agents in the pool.
api_enabled (bool): A flag to enable/disable the API.
logging_enabled (bool): A flag to enable/disable logging.
Example:
>>> from swarms.structs.agent import Agent
>>> from swarms.structs.swarm_net import SwarmNetwork
@ -200,14 +200,11 @@ class SwarmNetwork(BaseStructure):
"""
self.logger.info("[Listing all active agents]")
num_agents = len(self.agents)
self.logger.info(
f"[Number of active agents: {num_agents}]"
)
self.logger.info(f"[Number of active agents: {num_agents}]")
try:
for agent in self.agents:
return self.logger.info(
f"[Agent] [ID: {agent.id}] [Name:"
f" {agent.agent_name}] [Description:"

@ -40,5 +40,3 @@ def check_for_update():
return version.parse(latest_version) > version.parse(
current_version
)

Loading…
Cancel
Save