[removal][vllm]

pull/343/head
Kye 1 year ago
parent e8ca14f071
commit 1f1135bb7d

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "3.2.8"
version = "3.3.4"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -24,7 +24,7 @@ classifiers = [
[tool.poetry.dependencies]
python = "^3.6.1"
torch = "2.1.1"
transformers = "4.35.0"
transformers = "4.36.2"
openai = "0.28.0"
langchain = "0.0.333"
asyncio = "3.4.3"
@ -37,7 +37,7 @@ opencv-python-headless = "4.8.1.78"
faiss-cpu = "1.7.4"
backoff = "2.2.1"
marshmallow = "3.19.0"
datasets = "2.10.1"
datasets = "*"
optimum = "1.15.0"
diffusers = "*"
PyPDF2 = "3.0.1"
@ -53,7 +53,7 @@ ggl = "1.1.0"
ratelimit = "2.2.1"
beautifulsoup4 = "4.11.2"
cohere = "4.24"
huggingface-hub = "0.16.4"
huggingface-hub = "*"
pydantic = "1.10.12"
tenacity = "8.2.2"
Pillow = "9.4.0"

@ -1,5 +1,5 @@
torch==2.1.1
transformers>2.10==4.35.0
transformers>2.10==4.36.2
pandas==1.5.3
langchain==0.0.333
nest_asyncio==1.5.6
@ -72,6 +72,5 @@ pre-commit==3.2.2
sqlalchemy
pgvector
qdrant-client
sentence-transformers
peft
modelscope==1.10.0

@ -1,13 +1,11 @@
import torch
from swarms.models.base_llm import AbstractLLM
import subprocess
if torch.cuda.is_available() or torch.cuda.device_count() > 0:
# Download vllm with pip
try:
subprocess.run(["pip", "install", "vllm"])
from vllm import LLM, SamplingParams
except Exception as error:
except ImportError as error:
print(f"[ERROR] [vLLM] {error}")
raise error
else:

Loading…
Cancel
Save