Dockerfile for unit tests

pull/58/head
Kye 1 year ago
parent a11a41c941
commit 809ae57ead

@ -0,0 +1,48 @@
# This is a Dockerfile for running unit tests
ARG POETRY_HOME=/opt/poetry
# Use the Python base image
FROM python:3.11.2-bullseye AS builder
# Define the version of Poetry to install (default is 1.4.2)
ARG POETRY_VERSION=1.4.2
# Define the directory to install Poetry to (default is /opt/poetry)
ARG POETRY_HOME
# Create a Python virtual environment for Poetry and install it
RUN python3 -m venv ${POETRY_HOME} && \
$POETRY_HOME/bin/pip install --upgrade pip && \
$POETRY_HOME/bin/pip install poetry==${POETRY_VERSION}
# Test if Poetry is installed in the expected path
RUN echo "Poetry version:" && $POETRY_HOME/bin/poetry --version
# Set the working directory for the app
WORKDIR /app
# Use a multi-stage build to install dependencies
FROM builder AS dependencies
ARG POETRY_HOME
# Copy only the dependency files for installation
COPY pyproject.toml poetry.lock poetry.toml ./
# Install the Poetry dependencies (this layer will be cached as long as the dependencies don't change)
RUN $POETRY_HOME/bin/poetry install --no-interaction --no-ansi --with test
# Use a multi-stage build to run tests
FROM dependencies AS tests
# Copy the rest of the app source code (this layer will be invalidated and rebuilt whenever the source code changes)
COPY . .
RUN /opt/poetry/bin/poetry install --no-interaction --no-ansi --with test
# Set the entrypoint to run tests using Poetry
ENTRYPOINT ["/opt/poetry/bin/poetry", "run", "pytest"]
# Set the default command to run all unit tests
CMD ["tests/"]

@ -0,0 +1,4 @@
from swarms.models import OpenAIChat
llm = OpenAIChat(openai_api_key="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")
out = llm("Hello, I am a robot and I like to talk about robots.")

@ -54,3 +54,12 @@ termcolor = "*"
[tool.poetry.dev-dependencies]
first_dependency = {git = "https://github.com/IDEA-Research/GroundingDINO.git"}
second_dependency = {git = "https://github.com/facebookresearch/segment-anything.git"}
[tool.poetry.group.lint.dependencies]
ruff = "^0.0.249"
types-toml = "^0.10.8.1"
types-redis = "^4.3.21.6"
types-pytz = "^2023.3.0.0"
black = "^23.1.0"
types-chardet = "^5.0.4.6"
mypy-protobuf = "^3.0.0"

@ -2,7 +2,7 @@ import logging
import os
import time
import openai
import llm
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
@ -19,7 +19,7 @@ class OpenAI:
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
llm.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
@ -27,7 +27,7 @@ class OpenAI:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
llm.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
@ -59,14 +59,14 @@ class OpenAI:
"content": prompt
}
]
response = openai.ChatCompletion.create(
response = llm.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
else:
response = openai.Completion.create(
response = llm.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
@ -77,7 +77,7 @@ class OpenAI:
with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n")
return response
except openai.error.RateLimitError as e:
except llm.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)

@ -42,7 +42,7 @@ logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]:
import openai
import llm
min_seconds = 4
max_seconds = 10
@ -53,18 +53,18 @@ def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
retry_if_exception_type(llm.error.Timeout)
| retry_if_exception_type(llm.error.APIError)
| retry_if_exception_type(llm.error.APIConnectionError)
| retry_if_exception_type(llm.error.RateLimitError)
| retry_if_exception_type(llm.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
import openai
import llm
min_seconds = 4
max_seconds = 10
@ -75,11 +75,11 @@ def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
retry_if_exception_type(llm.error.Timeout)
| retry_if_exception_type(llm.error.APIError)
| retry_if_exception_type(llm.error.APIConnectionError)
| retry_if_exception_type(llm.error.RateLimitError)
| retry_if_exception_type(llm.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
@ -98,9 +98,9 @@ def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
# https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings
def _check_response(response: dict) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]):
import openai
import llm
raise openai.error.APIError("OpenAI API returned an empty embedding")
raise llm.error.APIError("OpenAI API returned an empty embedding")
return response
@ -278,9 +278,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
default="",
)
try:
import openai
import llm
values["client"] = openai.Embedding
values["client"] = llm.Embedding
except ImportError:
raise ImportError(
"Could not import openai python package. "
@ -304,9 +304,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
if self.openai_api_type in ("azure", "azure_ad", "azuread"):
openai_args["engine"] = self.deployment
if self.openai_proxy:
import openai
import llm
openai.proxy = {
llm.proxy = {
"http": self.openai_proxy,
"https": self.openai_proxy,
} # type: ignore[assignment] # noqa: E501

@ -1,10 +1,7 @@
# from swarms.models.palm import GooglePalm
# from swarms.models.openai import OpenAIChat
#prompts
from swarms.models.anthropic import Anthropic
# from swarms.models.palm import GooglePalm
from swarms.models.petals import Petals
# from swarms.models.openai import OpenAIChat
#prompts
from swarms.models.chat_openai import OpenAIChat
from swarms.models.prompts.debate import *
from swarms.models.mistral import Mistral

@ -86,14 +86,14 @@ def _create_retry_decorator(
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import openai
import llm
errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
llm.error.Timeout,
llm.error.APIError,
llm.error.APIConnectionError,
llm.error.RateLimitError,
llm.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
@ -247,9 +247,9 @@ class BaseOpenAI(BaseLLM):
default="",
)
try:
import openai
import llm
values["client"] = openai.Completion
values["client"] = llm.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
@ -494,9 +494,9 @@ class BaseOpenAI(BaseLLM):
"organization": self.openai_organization,
}
if self.openai_proxy:
import openai
import llm
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
llm.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
@ -765,22 +765,22 @@ class OpenAIChat(BaseLLM):
values, "openai_organization", "OPENAI_ORGANIZATION", default=""
)
try:
import openai
import llm
openai.api_key = openai_api_key
llm.api_key = openai_api_key
if openai_api_base:
openai.api_base = openai_api_base
llm.api_base = openai_api_base
if openai_organization:
openai.organization = openai_organization
llm.organization = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
llm.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
values["client"] = llm.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "

@ -1,17 +1,20 @@
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms.agents.message import Message
class Mistral:
"""
Mistral
model = MistralWrapper(device="cuda", use_flash_attention=True, temperature=0.7, max_length=200)
model = Mistral(device="cuda", use_flash_attention=True, temperature=0.7, max_length=200)
task = "My favourite condiment is"
result = model.run(task)
print(result)
"""
def __init__(
self,
ai_name: str = "Node Model Agent",
system_prompt: str = None,
model_name: str ="mistralai/Mistral-7B-v0.1",
device: str ="cuda",
use_flash_attention: bool = False,
@ -19,6 +22,8 @@ class Mistral:
max_length: int = 100,
do_sample: bool = True
):
self.ai_name = ai_name
self.system_prompt = system_prompt
self.model_name = model_name
self.device = device
self.use_flash_attention = use_flash_attention
@ -34,6 +39,8 @@ class Mistral:
self.tokenizer = None
self.load_model()
self.history = []
def load_model(self):
try:
self.model = AutoModelForCausalLM.from_pretrained(self.model_name)
@ -64,3 +71,83 @@ class Mistral:
return output_text
except Exception as e:
raise ValueError(f"Error running the model: {str(e)}")
def chat(
self,
msg: str = None,
streaming: bool = False
):
"""
Run chat
Args:
msg (str, optional): Message to send to the agent. Defaults to None.
language (str, optional): Language to use. Defaults to None.
streaming (bool, optional): Whether to stream the response. Defaults to False.
Returns:
str: Response from the agent
Usage:
--------------
agent = MultiModalAgent()
agent.chat("Hello")
"""
#add users message to the history
self.history.append(
Message(
"User",
msg
)
)
#process msg
try:
response = self.agent.run(msg)
#add agent's response to the history
self.history.append(
Message(
"Agent",
response
)
)
#if streaming is = True
if streaming:
return self._stream_response(response)
else:
response
except Exception as error:
error_message = f"Error processing message: {str(error)}"
#add error to history
self.history.append(
Message(
"Agent",
error_message
)
)
return error_message
def _stream_response(
self,
response: str = None
):
"""
Yield the response token by token (word by word)
Usage:
--------------
for token in _stream_response(response):
print(token)
"""
for token in response.split():
yield token

@ -0,0 +1,4 @@
from typing import List, Dict, Any, Union
from concurrent.futures import Executor, ThreadPoolExecutor, as_completed
from graphlib import TopologicalSorter
Loading…
Cancel
Save