From 11f06035b421c2296308f4005990ff45a506b43e Mon Sep 17 00:00:00 2001 From: Zack Date: Thu, 16 Nov 2023 14:00:14 -0600 Subject: [PATCH] fixx merge errors Former-commit-id: a22ab586084875125403a20ec9345459b0722c44 --- swarms/models/openai_models.py | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/swarms/models/openai_models.py b/swarms/models/openai_models.py index 7a868655..b4de06df 100644 --- a/swarms/models/openai_models.py +++ b/swarms/models/openai_models.py @@ -103,11 +103,7 @@ def _create_retry_decorator( import openai errors = [ -<<<<<<< HEAD - openai.Timeout, -======= openai.error.Timeout, ->>>>>>> master openai.error.APIError, openai.error.APIConnectionError, openai.error.RateLimitError, @@ -124,16 +120,8 @@ def completion_with_retry( **kwargs: Any, ) -> Any: """Use tenacity to retry the completion call.""" -<<<<<<< HEAD - if is_openai_v1(): - return llm.client.create(**kwargs) - - retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) - -======= retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) ->>>>>>> master @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.create(**kwargs) @@ -147,16 +135,8 @@ async def acompletion_with_retry( **kwargs: Any, ) -> Any: """Use tenacity to retry the async completion call.""" -<<<<<<< HEAD - if is_openai_v1(): - return await llm.async_client.create(**kwargs) - - retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) - -======= retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) ->>>>>>> master @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api @@ -618,12 +598,7 @@ class BaseOpenAI(BaseLLM): if self.openai_proxy: import openai -<<<<<<< HEAD - # TODO: The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(proxy={"http": self.openai_proxy, "https": self.openai_proxy})' - # openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 -======= openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 ->>>>>>> master return {**openai_creds, **self._default_params} @property