From 55f3ceb7cb25bde856303019307280ad1c72bbee Mon Sep 17 00:00:00 2001 From: Zack Date: Thu, 16 Nov 2023 13:03:05 -0600 Subject: [PATCH] fix: Fix open ai chat completions Former-commit-id: a6200eb5e95c25618fa085e86aa6a8608e681bf9 --- example.py | 5 ++--- swarms/models/openai_chat.py | 2 +- swarms/models/openai_models.py | 5 +++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/example.py b/example.py index 1888c663..c66c5454 100644 --- a/example.py +++ b/example.py @@ -1,4 +1,4 @@ -from swarms.models import OpenAIChat +from swarms.models import OpenAI from swarms.structs import Flow from langchain.schema.messages import ChatMessage, BaseMessage import os @@ -10,11 +10,10 @@ message: BaseMessage = [ ChatMessage(role="user", content='Translate the followi api_key = os.environ.get("OPENAI_API_KEY") # Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC -llm = OpenAIChat( +llm = OpenAI( # model_name="gpt-4" openai_api_key=api_key, temperature=0.5, - message = message # max_tokens=100, ) diff --git a/swarms/models/openai_chat.py b/swarms/models/openai_chat.py index 46057a45..d53658da 100644 --- a/swarms/models/openai_chat.py +++ b/swarms/models/openai_chat.py @@ -105,7 +105,7 @@ def _create_retry_decorator( openai.APIError, openai.APIConnectionError, openai.RateLimitError, - openai.error.ServiceUnavailableError, + openai.ServiceUnavailableError, ] return create_base_retry_decorator( error_types=errors, max_retries=llm.max_retries, run_manager=run_manager diff --git a/swarms/models/openai_models.py b/swarms/models/openai_models.py index f420173a..615bfb0e 100644 --- a/swarms/models/openai_models.py +++ b/swarms/models/openai_models.py @@ -93,7 +93,7 @@ def _create_retry_decorator( import openai errors = [ - openai.error.Timeout, + openai.Timeout, openai.error.APIError, openai.error.APIConnectionError, openai.error.RateLimitError, @@ -594,7 +594,8 @@ class BaseOpenAI(BaseLLM): if self.openai_proxy: import openai - openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 + # TODO: The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(proxy={"http": self.openai_proxy, "https": self.openai_proxy})' + # openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 return {**openai_creds, **self._default_params} @property