starting to work

pull/482/head
mike dupont 1 year ago
parent 6d7f1ef047
commit 2de1e4f991

@ -16,6 +16,7 @@ RUN pip3 install --no-cache-dir -r requirements.txt
# Copy the current directory contents into the container at /app # Copy the current directory contents into the container at /app
COPY scripts /app/scripts COPY scripts /app/scripts
COPY swarms /app/swarms COPY swarms /app/swarms
COPY example.py /app/example.py
# Make port 80 available to the world outside this container # Make port 80 available to the world outside this container
EXPOSE 80 EXPOSE 80

@ -1,77 +1,77 @@
from swarms.models.base_embedding_model import BaseEmbeddingModel from swarms.models.base_embedding_model import BaseEmbeddingModel
from swarms.models.base_llm import BaseLLM # noqa: E402 from swarms.models.base_llm import BaseLLM # noqa: E402
from swarms.models.base_multimodal_model import BaseMultiModalModel #from swarms.models.base_multimodal_model import BaseMultiModalModel
from swarms.models.fuyu import Fuyu # noqa: E402 #from swarms.models.fuyu import Fuyu # noqa: E402
from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402 #from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402
from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 #from swarms.models.huggingface import HuggingfaceLLM # noqa: E402
from swarms.models.idefics import Idefics # noqa: E402 #from swarms.models.idefics import Idefics # noqa: E402
from swarms.models.kosmos_two import Kosmos # noqa: E402 #from swarms.models.kosmos_two import Kosmos # noqa: E402
from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA #from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
from swarms.models.llava import LavaMultiModal # noqa: E402 #from swarms.models.llava import LavaMultiModal # noqa: E402
from swarms.models.nougat import Nougat # noqa: E402 #from swarms.models.nougat import Nougat # noqa: E402
from swarms.models.palm import GooglePalm as Palm # noqa: E402 #from swarms.models.palm import GooglePalm as Palm # noqa: E402
from swarms.models.openai_tts import OpenAITTS # noqa: E402 #from swarms.models.openai_tts import OpenAITTS # noqa: E402
from swarms.models.popular_llms import Anthropic as Anthropic #from swarms.models.popular_llms import Anthropic as Anthropic
from swarms.models.popular_llms import ( # from swarms.models.popular_llms import (
AzureOpenAILLM as AzureOpenAI, # AzureOpenAILLM as AzureOpenAI,
) # )
from swarms.models.popular_llms import ( # from swarms.models.popular_llms import (
CohereChat as Cohere, # CohereChat as Cohere,
) # )
from swarms.models.popular_llms import ( from swarms.models.popular_llms import (
OpenAIChatLLM as OpenAIChat, OpenAIChatLLM as OpenAIChat,
) )
from swarms.models.popular_llms import ( from swarms.models.popular_llms import (
OpenAILLM as OpenAI, OpenAILLM as OpenAI,
) )
from swarms.models.popular_llms import OctoAIChat # from swarms.models.popular_llms import OctoAIChat
from swarms.models.qwen import QwenVLMultiModal # noqa: E402 # from swarms.models.qwen import QwenVLMultiModal # noqa: E402
from swarms.models.popular_llms import ReplicateChat as Replicate # from swarms.models.popular_llms import ReplicateChat as Replicate
from swarms.models.sampling_params import SamplingParams, SamplingType # from swarms.models.sampling_params import SamplingParams, SamplingType
from swarms.models.together import TogetherLLM # noqa: E402 # from swarms.models.together import TogetherLLM # noqa: E402
from swarms.models.types import ( # noqa: E402 from swarms.models.types import ( # noqa: E402
AudioModality, # AudioModality,
ImageModality, # ImageModality,
MultimodalData, # MultimodalData,
TextModality, TextModality,
VideoModality, # VideoModality,
) )
from swarms.models.vilt import Vilt # noqa: E402 #from swarms.models.vilt import Vilt # noqa: E402
from swarms.models.openai_embeddings import OpenAIEmbeddings #from swarms.models.openai_embeddings import OpenAIEmbeddings
from swarms.models.llama3_hosted import llama3Hosted #from swarms.models.llama3_hosted import llama3Hosted
__all__ = [ # __all__ = [
"BaseEmbeddingModel", # "BaseEmbeddingModel",
"BaseLLM", # "BaseLLM",
"BaseMultiModalModel", # # "BaseMultiModalModel",
"Fuyu", # # "Fuyu",
"GPT4VisionAPI", # # "GPT4VisionAPI",
"HuggingfaceLLM", # # "HuggingfaceLLM",
"Idefics", # # "Idefics",
"Kosmos", # # "Kosmos",
"LayoutLMDocumentQA", # # "LayoutLMDocumentQA",
"LavaMultiModal", # # "LavaMultiModal",
"Nougat", # "Nougat",
"Palm", # "Palm",
"OpenAITTS", # "OpenAITTS",
"Anthropic", # "Anthropic",
"AzureOpenAI", # "AzureOpenAI",
"Cohere", # "Cohere",
"OpenAIChat", # "OpenAIChat",
"OpenAI", # "OpenAI",
"OctoAIChat", # "OctoAIChat",
"QwenVLMultiModal", # "QwenVLMultiModal",
"Replicate", # "Replicate",
"SamplingParams", # "SamplingParams",
"SamplingType", # "SamplingType",
"TogetherLLM", # "TogetherLLM",
"AudioModality", # "AudioModality",
"ImageModality", # "ImageModality",
"MultimodalData", # "MultimodalData",
"TextModality", # "TextModality",
"VideoModality", # "VideoModality",
"Vilt", # "Vilt",
"OpenAIEmbeddings", # "OpenAIEmbeddings",
"llama3Hosted", # "llama3Hosted",
] # ]

@ -718,7 +718,7 @@ class Agent(BaseStructure):
loop_count = 0 loop_count = 0
# Clear the short memory # Clear the short memory
# response = None response = "ERROR"
while self.max_loops == "auto" or loop_count < self.max_loops: while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1 loop_count += 1

@ -1,4 +1,4 @@
import torch #import torch
from transformers import ( from transformers import (
LogitsWarper, LogitsWarper,
PreTrainedTokenizer, PreTrainedTokenizer,
@ -13,7 +13,7 @@ class StringStoppingCriteria(StoppingCriteria):
def __call__( def __call__(
self, self,
input_ids: torch.LongTensor, input_ids,
_, _,
) -> bool: ) -> bool:
if len(input_ids[0]) <= self.prompt_length: if len(input_ids[0]) <= self.prompt_length:
@ -42,8 +42,8 @@ class NumberStoppingCriteria(StoppingCriteria):
def __call__( def __call__(
self, self,
input_ids: torch.LongTensor, input_ids,
scores: torch.FloatTensor, scores,
) -> bool: ) -> bool:
decoded = self.tokenizer.decode( decoded = self.tokenizer.decode(
input_ids[0][self.prompt_length :], input_ids[0][self.prompt_length :],
@ -74,7 +74,7 @@ class OutputNumbersTokens(LogitsWarper):
self.tokenizer = tokenizer self.tokenizer = tokenizer
self.tokenized_prompt = tokenizer(prompt, return_tensors="pt") self.tokenized_prompt = tokenizer(prompt, return_tensors="pt")
vocab_size = len(tokenizer) vocab_size = len(tokenizer)
self.allowed_mask = torch.zeros(vocab_size, dtype=torch.bool) #self.allowed_mask = torch.zeros(vocab_size, dtype=torch.bool)
for _, token_id in tokenizer.get_vocab().items(): for _, token_id in tokenizer.get_vocab().items():
token_str = tokenizer.decode(token_id).strip() token_str = tokenizer.decode(token_id).strip()

Loading…
Cancel
Save