Dockerfile for unit tests

Former-commit-id: 809ae57ead
group-chat
Kye 1 year ago
parent 99997aad68
commit bf1510298b

@ -0,0 +1,48 @@
# This is a Dockerfile for running unit tests
ARG POETRY_HOME=/opt/poetry
# Use the Python base image
FROM python:3.11.2-bullseye AS builder
# Define the version of Poetry to install (default is 1.4.2)
ARG POETRY_VERSION=1.4.2
# Define the directory to install Poetry to (default is /opt/poetry)
ARG POETRY_HOME
# Create a Python virtual environment for Poetry and install it
RUN python3 -m venv ${POETRY_HOME} && \
$POETRY_HOME/bin/pip install --upgrade pip && \
$POETRY_HOME/bin/pip install poetry==${POETRY_VERSION}
# Test if Poetry is installed in the expected path
RUN echo "Poetry version:" && $POETRY_HOME/bin/poetry --version
# Set the working directory for the app
WORKDIR /app
# Use a multi-stage build to install dependencies
FROM builder AS dependencies
ARG POETRY_HOME
# Copy only the dependency files for installation
COPY pyproject.toml poetry.lock poetry.toml ./
# Install the Poetry dependencies (this layer will be cached as long as the dependencies don't change)
RUN $POETRY_HOME/bin/poetry install --no-interaction --no-ansi --with test
# Use a multi-stage build to run tests
FROM dependencies AS tests
# Copy the rest of the app source code (this layer will be invalidated and rebuilt whenever the source code changes)
COPY . .
RUN /opt/poetry/bin/poetry install --no-interaction --no-ansi --with test
# Set the entrypoint to run tests using Poetry
ENTRYPOINT ["/opt/poetry/bin/poetry", "run", "pytest"]
# Set the default command to run all unit tests
CMD ["tests/"]

@ -0,0 +1,4 @@
from swarms.models import OpenAIChat
llm = OpenAIChat(openai_api_key="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")
out = llm("Hello, I am a robot and I like to talk about robots.")

@ -53,4 +53,13 @@ termcolor = "*"
[tool.poetry.dev-dependencies] [tool.poetry.dev-dependencies]
first_dependency = {git = "https://github.com/IDEA-Research/GroundingDINO.git"} first_dependency = {git = "https://github.com/IDEA-Research/GroundingDINO.git"}
second_dependency = {git = "https://github.com/facebookresearch/segment-anything.git"} second_dependency = {git = "https://github.com/facebookresearch/segment-anything.git"}
[tool.poetry.group.lint.dependencies]
ruff = "^0.0.249"
types-toml = "^0.10.8.1"
types-redis = "^4.3.21.6"
types-pytz = "^2023.3.0.0"
black = "^23.1.0"
types-chardet = "^5.0.4.6"
mypy-protobuf = "^3.0.0"

@ -2,7 +2,7 @@ import logging
import os import os
import time import time
import openai import llm
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -19,7 +19,7 @@ class OpenAI:
if api_key == "" or api_key is None: if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "") api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "": if api_key != "":
openai.api_key = api_key llm.api_key = api_key
else: else:
raise Exception("Please provide OpenAI API key") raise Exception("Please provide OpenAI API key")
@ -27,7 +27,7 @@ class OpenAI:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1" api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "": if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url # e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base llm.api_base = api_base
print(f'Using custom api_base {api_base}') print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None: if api_model == "" or api_model is None:
@ -59,14 +59,14 @@ class OpenAI:
"content": prompt "content": prompt
} }
] ]
response = openai.ChatCompletion.create( response = llm.ChatCompletion.create(
model=self.api_model, model=self.api_model,
messages=messages, messages=messages,
max_tokens=max_tokens, max_tokens=max_tokens,
temperature=temperature, temperature=temperature,
) )
else: else:
response = openai.Completion.create( response = llm.Completion.create(
engine=self.api_model, engine=self.api_model,
prompt=prompt, prompt=prompt,
n=k, n=k,
@ -77,7 +77,7 @@ class OpenAI:
with open("openai.logs", 'a') as log_file: with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n") log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n")
return response return response
except openai.error.RateLimitError as e: except llm.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30) sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT') print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin) time.sleep(sleep_duratoin)

@ -42,7 +42,7 @@ logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]:
import openai import llm
min_seconds = 4 min_seconds = 4
max_seconds = 10 max_seconds = 10
@ -53,18 +53,18 @@ def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any
stop=stop_after_attempt(embeddings.max_retries), stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=( retry=(
retry_if_exception_type(openai.error.Timeout) retry_if_exception_type(llm.error.Timeout)
| retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(llm.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(llm.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(llm.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError) | retry_if_exception_type(llm.error.ServiceUnavailableError)
), ),
before_sleep=before_sleep_log(logger, logging.WARNING), before_sleep=before_sleep_log(logger, logging.WARNING),
) )
def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
import openai import llm
min_seconds = 4 min_seconds = 4
max_seconds = 10 max_seconds = 10
@ -75,11 +75,11 @@ def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
stop=stop_after_attempt(embeddings.max_retries), stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=( retry=(
retry_if_exception_type(openai.error.Timeout) retry_if_exception_type(llm.error.Timeout)
| retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(llm.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(llm.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(llm.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError) | retry_if_exception_type(llm.error.ServiceUnavailableError)
), ),
before_sleep=before_sleep_log(logger, logging.WARNING), before_sleep=before_sleep_log(logger, logging.WARNING),
) )
@ -98,9 +98,9 @@ def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
# https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings # https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings
def _check_response(response: dict) -> dict: def _check_response(response: dict) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]): if any(len(d["embedding"]) == 1 for d in response["data"]):
import openai import llm
raise openai.error.APIError("OpenAI API returned an empty embedding") raise llm.error.APIError("OpenAI API returned an empty embedding")
return response return response
@ -278,9 +278,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
default="", default="",
) )
try: try:
import openai import llm
values["client"] = openai.Embedding values["client"] = llm.Embedding
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"Could not import openai python package. " "Could not import openai python package. "
@ -304,9 +304,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
if self.openai_api_type in ("azure", "azure_ad", "azuread"): if self.openai_api_type in ("azure", "azure_ad", "azuread"):
openai_args["engine"] = self.deployment openai_args["engine"] = self.deployment
if self.openai_proxy: if self.openai_proxy:
import openai import llm
openai.proxy = { llm.proxy = {
"http": self.openai_proxy, "http": self.openai_proxy,
"https": self.openai_proxy, "https": self.openai_proxy,
} # type: ignore[assignment] # noqa: E501 } # type: ignore[assignment] # noqa: E501

@ -1,10 +1,7 @@
# from swarms.models.palm import GooglePalm
# from swarms.models.openai import OpenAIChat
#prompts #prompts
from swarms.models.anthropic import Anthropic from swarms.models.anthropic import Anthropic
# from swarms.models.palm import GooglePalm # from swarms.models.palm import GooglePalm
from swarms.models.petals import Petals from swarms.models.petals import Petals
# from swarms.models.openai import OpenAIChat from swarms.models.chat_openai import OpenAIChat
#prompts
from swarms.models.prompts.debate import * from swarms.models.prompts.debate import *
from swarms.models.mistral import Mistral from swarms.models.mistral import Mistral

@ -86,14 +86,14 @@ def _create_retry_decorator(
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None, ] = None,
) -> Callable[[Any], Any]: ) -> Callable[[Any], Any]:
import openai import llm
errors = [ errors = [
openai.error.Timeout, llm.error.Timeout,
openai.error.APIError, llm.error.APIError,
openai.error.APIConnectionError, llm.error.APIConnectionError,
openai.error.RateLimitError, llm.error.RateLimitError,
openai.error.ServiceUnavailableError, llm.error.ServiceUnavailableError,
] ]
return create_base_retry_decorator( return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
@ -247,9 +247,9 @@ class BaseOpenAI(BaseLLM):
default="", default="",
) )
try: try:
import openai import llm
values["client"] = openai.Completion values["client"] = llm.Completion
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"Could not import openai python package. " "Could not import openai python package. "
@ -494,9 +494,9 @@ class BaseOpenAI(BaseLLM):
"organization": self.openai_organization, "organization": self.openai_organization,
} }
if self.openai_proxy: if self.openai_proxy:
import openai import llm
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 llm.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params} return {**openai_creds, **self._default_params}
@property @property
@ -765,22 +765,22 @@ class OpenAIChat(BaseLLM):
values, "openai_organization", "OPENAI_ORGANIZATION", default="" values, "openai_organization", "OPENAI_ORGANIZATION", default=""
) )
try: try:
import openai import llm
openai.api_key = openai_api_key llm.api_key = openai_api_key
if openai_api_base: if openai_api_base:
openai.api_base = openai_api_base llm.api_base = openai_api_base
if openai_organization: if openai_organization:
openai.organization = openai_organization llm.organization = openai_organization
if openai_proxy: if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 llm.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"Could not import openai python package. " "Could not import openai python package. "
"Please install it with `pip install openai`." "Please install it with `pip install openai`."
) )
try: try:
values["client"] = openai.ChatCompletion values["client"] = llm.ChatCompletion
except AttributeError: except AttributeError:
raise ValueError( raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely " "`openai` has no `ChatCompletion` attribute, this is likely "

@ -1,17 +1,20 @@
import torch import torch
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms.agents.message import Message
class Mistral: class Mistral:
""" """
Mistral Mistral
model = MistralWrapper(device="cuda", use_flash_attention=True, temperature=0.7, max_length=200) model = Mistral(device="cuda", use_flash_attention=True, temperature=0.7, max_length=200)
task = "My favourite condiment is" task = "My favourite condiment is"
result = model.run(task) result = model.run(task)
print(result) print(result)
""" """
def __init__( def __init__(
self, self,
ai_name: str = "Node Model Agent",
system_prompt: str = None,
model_name: str ="mistralai/Mistral-7B-v0.1", model_name: str ="mistralai/Mistral-7B-v0.1",
device: str ="cuda", device: str ="cuda",
use_flash_attention: bool = False, use_flash_attention: bool = False,
@ -19,6 +22,8 @@ class Mistral:
max_length: int = 100, max_length: int = 100,
do_sample: bool = True do_sample: bool = True
): ):
self.ai_name = ai_name
self.system_prompt = system_prompt
self.model_name = model_name self.model_name = model_name
self.device = device self.device = device
self.use_flash_attention = use_flash_attention self.use_flash_attention = use_flash_attention
@ -34,6 +39,8 @@ class Mistral:
self.tokenizer = None self.tokenizer = None
self.load_model() self.load_model()
self.history = []
def load_model(self): def load_model(self):
try: try:
self.model = AutoModelForCausalLM.from_pretrained(self.model_name) self.model = AutoModelForCausalLM.from_pretrained(self.model_name)
@ -63,4 +70,84 @@ class Mistral:
output_text = self.tokenizer.batch_decode(generated_ids)[0] output_text = self.tokenizer.batch_decode(generated_ids)[0]
return output_text return output_text
except Exception as e: except Exception as e:
raise ValueError(f"Error running the model: {str(e)}") raise ValueError(f"Error running the model: {str(e)}")
def chat(
self,
msg: str = None,
streaming: bool = False
):
"""
Run chat
Args:
msg (str, optional): Message to send to the agent. Defaults to None.
language (str, optional): Language to use. Defaults to None.
streaming (bool, optional): Whether to stream the response. Defaults to False.
Returns:
str: Response from the agent
Usage:
--------------
agent = MultiModalAgent()
agent.chat("Hello")
"""
#add users message to the history
self.history.append(
Message(
"User",
msg
)
)
#process msg
try:
response = self.agent.run(msg)
#add agent's response to the history
self.history.append(
Message(
"Agent",
response
)
)
#if streaming is = True
if streaming:
return self._stream_response(response)
else:
response
except Exception as error:
error_message = f"Error processing message: {str(error)}"
#add error to history
self.history.append(
Message(
"Agent",
error_message
)
)
return error_message
def _stream_response(
self,
response: str = None
):
"""
Yield the response token by token (word by word)
Usage:
--------------
for token in _stream_response(response):
print(token)
"""
for token in response.split():
yield token

@ -0,0 +1,4 @@
from typing import List, Dict, Any, Union
from concurrent.futures import Executor, ThreadPoolExecutor, as_completed
from graphlib import TopologicalSorter
Loading…
Cancel
Save