starting to work

pull/482/head
mike dupont 11 months ago
parent 6d7f1ef047
commit 2de1e4f991

@ -16,6 +16,7 @@ RUN pip3 install --no-cache-dir -r requirements.txt
# Copy the current directory contents into the container at /app
COPY scripts /app/scripts
COPY swarms /app/swarms
COPY example.py /app/example.py
# Make port 80 available to the world outside this container
EXPOSE 80

@ -1,77 +1,77 @@
from swarms.models.base_embedding_model import BaseEmbeddingModel
from swarms.models.base_llm import BaseLLM # noqa: E402
from swarms.models.base_multimodal_model import BaseMultiModalModel
from swarms.models.fuyu import Fuyu # noqa: E402
from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402
from swarms.models.huggingface import HuggingfaceLLM # noqa: E402
from swarms.models.idefics import Idefics # noqa: E402
from swarms.models.kosmos_two import Kosmos # noqa: E402
from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
from swarms.models.llava import LavaMultiModal # noqa: E402
#from swarms.models.base_multimodal_model import BaseMultiModalModel
#from swarms.models.fuyu import Fuyu # noqa: E402
#from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402
#from swarms.models.huggingface import HuggingfaceLLM # noqa: E402
#from swarms.models.idefics import Idefics # noqa: E402
#from swarms.models.kosmos_two import Kosmos # noqa: E402
#from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
#from swarms.models.llava import LavaMultiModal # noqa: E402
from swarms.models.nougat import Nougat # noqa: E402
from swarms.models.palm import GooglePalm as Palm # noqa: E402
from swarms.models.openai_tts import OpenAITTS # noqa: E402
from swarms.models.popular_llms import Anthropic as Anthropic
from swarms.models.popular_llms import (
AzureOpenAILLM as AzureOpenAI,
)
from swarms.models.popular_llms import (
CohereChat as Cohere,
)
#from swarms.models.nougat import Nougat # noqa: E402
#from swarms.models.palm import GooglePalm as Palm # noqa: E402
#from swarms.models.openai_tts import OpenAITTS # noqa: E402
#from swarms.models.popular_llms import Anthropic as Anthropic
# from swarms.models.popular_llms import (
# AzureOpenAILLM as AzureOpenAI,
# )
# from swarms.models.popular_llms import (
# CohereChat as Cohere,
# )
from swarms.models.popular_llms import (
OpenAIChatLLM as OpenAIChat,
)
from swarms.models.popular_llms import (
OpenAILLM as OpenAI,
)
from swarms.models.popular_llms import OctoAIChat
from swarms.models.qwen import QwenVLMultiModal # noqa: E402
from swarms.models.popular_llms import ReplicateChat as Replicate
from swarms.models.sampling_params import SamplingParams, SamplingType
from swarms.models.together import TogetherLLM # noqa: E402
# from swarms.models.popular_llms import OctoAIChat
# from swarms.models.qwen import QwenVLMultiModal # noqa: E402
# from swarms.models.popular_llms import ReplicateChat as Replicate
# from swarms.models.sampling_params import SamplingParams, SamplingType
# from swarms.models.together import TogetherLLM # noqa: E402
from swarms.models.types import ( # noqa: E402
AudioModality,
ImageModality,
MultimodalData,
# AudioModality,
# ImageModality,
# MultimodalData,
TextModality,
VideoModality,
# VideoModality,
)
from swarms.models.vilt import Vilt # noqa: E402
from swarms.models.openai_embeddings import OpenAIEmbeddings
from swarms.models.llama3_hosted import llama3Hosted
#from swarms.models.vilt import Vilt # noqa: E402
#from swarms.models.openai_embeddings import OpenAIEmbeddings
#from swarms.models.llama3_hosted import llama3Hosted
__all__ = [
"BaseEmbeddingModel",
"BaseLLM",
"BaseMultiModalModel",
"Fuyu",
"GPT4VisionAPI",
"HuggingfaceLLM",
"Idefics",
"Kosmos",
"LayoutLMDocumentQA",
"LavaMultiModal",
"Nougat",
"Palm",
"OpenAITTS",
"Anthropic",
"AzureOpenAI",
"Cohere",
"OpenAIChat",
"OpenAI",
"OctoAIChat",
"QwenVLMultiModal",
"Replicate",
"SamplingParams",
"SamplingType",
"TogetherLLM",
"AudioModality",
"ImageModality",
"MultimodalData",
"TextModality",
"VideoModality",
"Vilt",
"OpenAIEmbeddings",
"llama3Hosted",
]
# __all__ = [
# "BaseEmbeddingModel",
# "BaseLLM",
# # "BaseMultiModalModel",
# # "Fuyu",
# # "GPT4VisionAPI",
# # "HuggingfaceLLM",
# # "Idefics",
# # "Kosmos",
# # "LayoutLMDocumentQA",
# # "LavaMultiModal",
# "Nougat",
# "Palm",
# "OpenAITTS",
# "Anthropic",
# "AzureOpenAI",
# "Cohere",
# "OpenAIChat",
# "OpenAI",
# "OctoAIChat",
# "QwenVLMultiModal",
# "Replicate",
# "SamplingParams",
# "SamplingType",
# "TogetherLLM",
# "AudioModality",
# "ImageModality",
# "MultimodalData",
# "TextModality",
# "VideoModality",
# "Vilt",
# "OpenAIEmbeddings",
# "llama3Hosted",
# ]

@ -718,7 +718,7 @@ class Agent(BaseStructure):
loop_count = 0
# Clear the short memory
# response = None
response = "ERROR"
while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1

@ -1,4 +1,4 @@
import torch
#import torch
from transformers import (
LogitsWarper,
PreTrainedTokenizer,
@ -13,7 +13,7 @@ class StringStoppingCriteria(StoppingCriteria):
def __call__(
self,
input_ids: torch.LongTensor,
input_ids,
_,
) -> bool:
if len(input_ids[0]) <= self.prompt_length:
@ -42,8 +42,8 @@ class NumberStoppingCriteria(StoppingCriteria):
def __call__(
self,
input_ids: torch.LongTensor,
scores: torch.FloatTensor,
input_ids,
scores,
) -> bool:
decoded = self.tokenizer.decode(
input_ids[0][self.prompt_length :],
@ -74,7 +74,7 @@ class OutputNumbersTokens(LogitsWarper):
self.tokenizer = tokenizer
self.tokenized_prompt = tokenizer(prompt, return_tensors="pt")
vocab_size = len(tokenizer)
self.allowed_mask = torch.zeros(vocab_size, dtype=torch.bool)
#self.allowed_mask = torch.zeros(vocab_size, dtype=torch.bool)
for _, token_id in tokenizer.get_vocab().items():
token_str = tokenizer.decode(token_id).strip()

Loading…
Cancel
Save