From bf1510298b5baf1c7bd998548485fce495b7e4f5 Mon Sep 17 00:00:00 2001 From: Kye Date: Fri, 6 Oct 2023 10:23:35 -0400 Subject: [PATCH] Dockerfile for unit tests Former-commit-id: 809ae57ead75c289b94f13560296c4d833a9dd38 --- Dockerfile | 48 +++++++++++ llm.py | 4 + pyproject.toml | 11 ++- swarms/agents/aot.py | 12 +-- swarms/embeddings/openai.py | 36 ++++---- swarms/models/__init__.py | 5 +- swarms/models/{openai.py => chat_openai.py} | 32 +++---- swarms/models/huggingface.py | 0 swarms/models/mistral.py | 93 ++++++++++++++++++++- swarms/structs/nonlinear_workflow.py | 4 + 10 files changed, 197 insertions(+), 48 deletions(-) create mode 100644 Dockerfile create mode 100644 llm.py rename swarms/models/{openai.py => chat_openai.py} (97%) delete mode 100644 swarms/models/huggingface.py create mode 100644 swarms/structs/nonlinear_workflow.py diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..38e135ab --- /dev/null +++ b/Dockerfile @@ -0,0 +1,48 @@ +# This is a Dockerfile for running unit tests + +ARG POETRY_HOME=/opt/poetry + +# Use the Python base image +FROM python:3.11.2-bullseye AS builder + +# Define the version of Poetry to install (default is 1.4.2) +ARG POETRY_VERSION=1.4.2 + +# Define the directory to install Poetry to (default is /opt/poetry) +ARG POETRY_HOME + +# Create a Python virtual environment for Poetry and install it +RUN python3 -m venv ${POETRY_HOME} && \ + $POETRY_HOME/bin/pip install --upgrade pip && \ + $POETRY_HOME/bin/pip install poetry==${POETRY_VERSION} + +# Test if Poetry is installed in the expected path +RUN echo "Poetry version:" && $POETRY_HOME/bin/poetry --version + +# Set the working directory for the app +WORKDIR /app + +# Use a multi-stage build to install dependencies +FROM builder AS dependencies + +ARG POETRY_HOME + +# Copy only the dependency files for installation +COPY pyproject.toml poetry.lock poetry.toml ./ + +# Install the Poetry dependencies (this layer will be cached as long as the dependencies don't change) +RUN $POETRY_HOME/bin/poetry install --no-interaction --no-ansi --with test + +# Use a multi-stage build to run tests +FROM dependencies AS tests + +# Copy the rest of the app source code (this layer will be invalidated and rebuilt whenever the source code changes) +COPY . . + +RUN /opt/poetry/bin/poetry install --no-interaction --no-ansi --with test + +# Set the entrypoint to run tests using Poetry +ENTRYPOINT ["/opt/poetry/bin/poetry", "run", "pytest"] + +# Set the default command to run all unit tests +CMD ["tests/"] \ No newline at end of file diff --git a/llm.py b/llm.py new file mode 100644 index 00000000..93907c0b --- /dev/null +++ b/llm.py @@ -0,0 +1,4 @@ +from swarms.models import OpenAIChat + +llm = OpenAIChat(openai_api_key="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") +out = llm("Hello, I am a robot and I like to talk about robots.") \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 51d096c5..f615fa66 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,4 +53,13 @@ termcolor = "*" [tool.poetry.dev-dependencies] first_dependency = {git = "https://github.com/IDEA-Research/GroundingDINO.git"} -second_dependency = {git = "https://github.com/facebookresearch/segment-anything.git"} \ No newline at end of file +second_dependency = {git = "https://github.com/facebookresearch/segment-anything.git"} + +[tool.poetry.group.lint.dependencies] +ruff = "^0.0.249" +types-toml = "^0.10.8.1" +types-redis = "^4.3.21.6" +types-pytz = "^2023.3.0.0" +black = "^23.1.0" +types-chardet = "^5.0.4.6" +mypy-protobuf = "^3.0.0" \ No newline at end of file diff --git a/swarms/agents/aot.py b/swarms/agents/aot.py index daf31481..de02155e 100644 --- a/swarms/agents/aot.py +++ b/swarms/agents/aot.py @@ -2,7 +2,7 @@ import logging import os import time -import openai +import llm logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) @@ -19,7 +19,7 @@ class OpenAI: if api_key == "" or api_key is None: api_key = os.environ.get("OPENAI_API_KEY", "") if api_key != "": - openai.api_key = api_key + llm.api_key = api_key else: raise Exception("Please provide OpenAI API key") @@ -27,7 +27,7 @@ class OpenAI: api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1" if api_base != "": # e.g. https://api.openai.com/v1/ or your custom url - openai.api_base = api_base + llm.api_base = api_base print(f'Using custom api_base {api_base}') if api_model == "" or api_model is None: @@ -59,14 +59,14 @@ class OpenAI: "content": prompt } ] - response = openai.ChatCompletion.create( + response = llm.ChatCompletion.create( model=self.api_model, messages=messages, max_tokens=max_tokens, temperature=temperature, ) else: - response = openai.Completion.create( + response = llm.Completion.create( engine=self.api_model, prompt=prompt, n=k, @@ -77,7 +77,7 @@ class OpenAI: with open("openai.logs", 'a') as log_file: log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n") return response - except openai.error.RateLimitError as e: + except llm.error.RateLimitError as e: sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30) print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT') time.sleep(sleep_duratoin) diff --git a/swarms/embeddings/openai.py b/swarms/embeddings/openai.py index f1e67315..bdb25868 100644 --- a/swarms/embeddings/openai.py +++ b/swarms/embeddings/openai.py @@ -42,7 +42,7 @@ logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: - import openai + import llm min_seconds = 4 max_seconds = 10 @@ -53,18 +53,18 @@ def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( - retry_if_exception_type(openai.error.Timeout) - | retry_if_exception_type(openai.error.APIError) - | retry_if_exception_type(openai.error.APIConnectionError) - | retry_if_exception_type(openai.error.RateLimitError) - | retry_if_exception_type(openai.error.ServiceUnavailableError) + retry_if_exception_type(llm.error.Timeout) + | retry_if_exception_type(llm.error.APIError) + | retry_if_exception_type(llm.error.APIConnectionError) + | retry_if_exception_type(llm.error.RateLimitError) + | retry_if_exception_type(llm.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: - import openai + import llm min_seconds = 4 max_seconds = 10 @@ -75,11 +75,11 @@ def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( - retry_if_exception_type(openai.error.Timeout) - | retry_if_exception_type(openai.error.APIError) - | retry_if_exception_type(openai.error.APIConnectionError) - | retry_if_exception_type(openai.error.RateLimitError) - | retry_if_exception_type(openai.error.ServiceUnavailableError) + retry_if_exception_type(llm.error.Timeout) + | retry_if_exception_type(llm.error.APIError) + | retry_if_exception_type(llm.error.APIConnectionError) + | retry_if_exception_type(llm.error.RateLimitError) + | retry_if_exception_type(llm.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) @@ -98,9 +98,9 @@ def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: # https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings def _check_response(response: dict) -> dict: if any(len(d["embedding"]) == 1 for d in response["data"]): - import openai + import llm - raise openai.error.APIError("OpenAI API returned an empty embedding") + raise llm.error.APIError("OpenAI API returned an empty embedding") return response @@ -278,9 +278,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings): default="", ) try: - import openai + import llm - values["client"] = openai.Embedding + values["client"] = llm.Embedding except ImportError: raise ImportError( "Could not import openai python package. " @@ -304,9 +304,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings): if self.openai_api_type in ("azure", "azure_ad", "azuread"): openai_args["engine"] = self.deployment if self.openai_proxy: - import openai + import llm - openai.proxy = { + llm.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index 575d69a7..0e0280f6 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -1,10 +1,7 @@ -# from swarms.models.palm import GooglePalm -# from swarms.models.openai import OpenAIChat #prompts from swarms.models.anthropic import Anthropic # from swarms.models.palm import GooglePalm from swarms.models.petals import Petals -# from swarms.models.openai import OpenAIChat -#prompts +from swarms.models.chat_openai import OpenAIChat from swarms.models.prompts.debate import * from swarms.models.mistral import Mistral \ No newline at end of file diff --git a/swarms/models/openai.py b/swarms/models/chat_openai.py similarity index 97% rename from swarms/models/openai.py rename to swarms/models/chat_openai.py index 32ffa1db..2b2e3644 100644 --- a/swarms/models/openai.py +++ b/swarms/models/chat_openai.py @@ -86,14 +86,14 @@ def _create_retry_decorator( Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ] = None, ) -> Callable[[Any], Any]: - import openai + import llm errors = [ - openai.error.Timeout, - openai.error.APIError, - openai.error.APIConnectionError, - openai.error.RateLimitError, - openai.error.ServiceUnavailableError, + llm.error.Timeout, + llm.error.APIError, + llm.error.APIConnectionError, + llm.error.RateLimitError, + llm.error.ServiceUnavailableError, ] return create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager @@ -247,9 +247,9 @@ class BaseOpenAI(BaseLLM): default="", ) try: - import openai + import llm - values["client"] = openai.Completion + values["client"] = llm.Completion except ImportError: raise ImportError( "Could not import openai python package. " @@ -494,9 +494,9 @@ class BaseOpenAI(BaseLLM): "organization": self.openai_organization, } if self.openai_proxy: - import openai + import llm - openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 + llm.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 return {**openai_creds, **self._default_params} @property @@ -765,22 +765,22 @@ class OpenAIChat(BaseLLM): values, "openai_organization", "OPENAI_ORGANIZATION", default="" ) try: - import openai + import llm - openai.api_key = openai_api_key + llm.api_key = openai_api_key if openai_api_base: - openai.api_base = openai_api_base + llm.api_base = openai_api_base if openai_organization: - openai.organization = openai_organization + llm.organization = openai_organization if openai_proxy: - openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 + llm.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: - values["client"] = openai.ChatCompletion + values["client"] = llm.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " diff --git a/swarms/models/huggingface.py b/swarms/models/huggingface.py deleted file mode 100644 index e69de29b..00000000 diff --git a/swarms/models/mistral.py b/swarms/models/mistral.py index d822e278..79eee1e3 100644 --- a/swarms/models/mistral.py +++ b/swarms/models/mistral.py @@ -1,17 +1,20 @@ import torch from transformers import AutoModelForCausalLM, AutoTokenizer +from swarms.agents.message import Message class Mistral: """ Mistral - model = MistralWrapper(device="cuda", use_flash_attention=True, temperature=0.7, max_length=200) + model = Mistral(device="cuda", use_flash_attention=True, temperature=0.7, max_length=200) task = "My favourite condiment is" result = model.run(task) print(result) """ def __init__( - self, + self, + ai_name: str = "Node Model Agent", + system_prompt: str = None, model_name: str ="mistralai/Mistral-7B-v0.1", device: str ="cuda", use_flash_attention: bool = False, @@ -19,6 +22,8 @@ class Mistral: max_length: int = 100, do_sample: bool = True ): + self.ai_name = ai_name + self.system_prompt = system_prompt self.model_name = model_name self.device = device self.use_flash_attention = use_flash_attention @@ -34,6 +39,8 @@ class Mistral: self.tokenizer = None self.load_model() + self.history = [] + def load_model(self): try: self.model = AutoModelForCausalLM.from_pretrained(self.model_name) @@ -63,4 +70,84 @@ class Mistral: output_text = self.tokenizer.batch_decode(generated_ids)[0] return output_text except Exception as e: - raise ValueError(f"Error running the model: {str(e)}") \ No newline at end of file + raise ValueError(f"Error running the model: {str(e)}") + + def chat( + self, + msg: str = None, + streaming: bool = False + ): + """ + Run chat + + Args: + msg (str, optional): Message to send to the agent. Defaults to None. + language (str, optional): Language to use. Defaults to None. + streaming (bool, optional): Whether to stream the response. Defaults to False. + + Returns: + str: Response from the agent + + Usage: + -------------- + agent = MultiModalAgent() + agent.chat("Hello") + + """ + + #add users message to the history + self.history.append( + Message( + "User", + msg + ) + ) + + #process msg + try: + response = self.agent.run(msg) + + #add agent's response to the history + self.history.append( + Message( + "Agent", + response + ) + ) + + #if streaming is = True + if streaming: + return self._stream_response(response) + else: + response + + except Exception as error: + error_message = f"Error processing message: {str(error)}" + + #add error to history + self.history.append( + Message( + "Agent", + error_message + ) + ) + + return error_message + + def _stream_response( + self, + response: str = None + ): + """ + Yield the response token by token (word by word) + + Usage: + -------------- + for token in _stream_response(response): + print(token) + + """ + for token in response.split(): + yield token + + diff --git a/swarms/structs/nonlinear_workflow.py b/swarms/structs/nonlinear_workflow.py new file mode 100644 index 00000000..2eebcb9b --- /dev/null +++ b/swarms/structs/nonlinear_workflow.py @@ -0,0 +1,4 @@ +from typing import List, Dict, Any, Union +from concurrent.futures import Executor, ThreadPoolExecutor, as_completed +from graphlib import TopologicalSorter +