sync main with master fix merge errors

pull/298/head
Zack 1 year ago
parent c4c7f0d847
commit 74cf494e23

@ -46,12 +46,4 @@ jobs:
env: env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }} POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: | run: |
<<<<<<< HEAD
<<<<<<< HEAD
poetry publish poetry publish
=======
poetry publish
>>>>>>> model-flow
=======
poetry publish
>>>>>>> master

19
.gitignore vendored

@ -9,6 +9,10 @@ video/
dataframe/ dataframe/
static/generated static/generated
<<<<<<< HEAD
=======
runs
>>>>>>> master
swarms/__pycache__ swarms/__pycache__
venv venv
.DS_Store .DS_Store
@ -19,6 +23,10 @@ swarms/agents/.DS_Store
_build _build
stderr_log.txt stderr_log.txt
<<<<<<< HEAD
=======
.vscode
>>>>>>> master
.DS_STORE .DS_STORE
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
@ -34,8 +42,12 @@ error.txt
errors.txt errors.txt
<<<<<<< HEAD
models/ models/
=======
Autonomous-Agent-XYZ1B_state.json
>>>>>>> master
# Distribution / packaging # Distribution / packaging
.Python .Python
build/ build/
@ -157,7 +169,10 @@ venv/
ENV/ ENV/
env.bak/ env.bak/
venv.bak/ venv.bak/
<<<<<<< HEAD
secret_keys.sh secret_keys.sh
=======
>>>>>>> master
# Spyder project settings # Spyder project settings
.spyderproject .spyderproject
@ -189,3 +204,7 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear # and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder. # option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/ #.idea/
<<<<<<< HEAD
=======
.vscode/settings.json
>>>>>>> master

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

After

Width:  |  Height:  |  Size: 132 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.0 MiB

After

Width:  |  Height:  |  Size: 132 B

@ -27,14 +27,10 @@ from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.pydantic_v1 import Field, root_validator from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import Generation, LLMResult from langchain.schema import Generation, LLMResult
from langchain.schema.output import GenerationChunk from langchain.schema.output import GenerationChunk
<<<<<<< HEAD
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
=======
from langchain.utils import ( from langchain.utils import (
get_from_dict_or_env, get_from_dict_or_env,
get_pydantic_field_names, get_pydantic_field_names,
) )
>>>>>>> master
from langchain.utils.utils import build_extra_kwargs from langchain.utils.utils import build_extra_kwargs
@ -51,13 +47,9 @@ def is_openai_v1() -> bool:
def update_token_usage( def update_token_usage(
<<<<<<< HEAD
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
=======
keys: Set[str], keys: Set[str],
response: Dict[str, Any], response: Dict[str, Any],
token_usage: Dict[str, Any], token_usage: Dict[str, Any],
>>>>>>> master
) -> None: ) -> None:
"""Update token usage.""" """Update token usage."""
_keys_to_use = keys.intersection(response["usage"]) _keys_to_use = keys.intersection(response["usage"])
@ -75,30 +67,16 @@ def _stream_response_to_generation_chunk(
return GenerationChunk( return GenerationChunk(
text=stream_response["choices"][0]["text"], text=stream_response["choices"][0]["text"],
generation_info=dict( generation_info=dict(
<<<<<<< HEAD
finish_reason=stream_response["choices"][0].get("finish_reason", None),
logprobs=stream_response["choices"][0].get("logprobs", None),
=======
finish_reason=stream_response["choices"][0].get( finish_reason=stream_response["choices"][0].get(
"finish_reason", None "finish_reason", None
), ),
logprobs=stream_response["choices"][0].get( logprobs=stream_response["choices"][0].get(
"logprobs", None "logprobs", None
), ),
>>>>>>> master
), ),
) )
<<<<<<< HEAD
def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:
"""Update response from the stream response."""
response["choices"][0]["text"] += stream_response["choices"][0]["text"]
response["choices"][0]["finish_reason"] = stream_response["choices"][0].get(
"finish_reason", None
)
response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"]
=======
def _update_response( def _update_response(
response: Dict[str, Any], stream_response: Dict[str, Any] response: Dict[str, Any], stream_response: Dict[str, Any]
) -> None: ) -> None:
@ -112,7 +90,6 @@ def _update_response(
response["choices"][0]["logprobs"] = stream_response["choices"][ response["choices"][0]["logprobs"] = stream_response["choices"][
0 0
]["logprobs"] ]["logprobs"]
>>>>>>> master
def _streaming_response_template() -> Dict[str, Any]: def _streaming_response_template() -> Dict[str, Any]:
@ -127,26 +104,6 @@ def _streaming_response_template() -> Dict[str, Any]:
} }
<<<<<<< HEAD
# def _create_retry_decorator(
# llm: Union[BaseOpenAI, OpenAIChat],
# run_manager: Optional[
# Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
# ] = None,
# ) -> Callable[[Any], Any]:
# import openai
# errors = [
# openai.Timeout,
# openai.APIError,
# openai.error.APIConnectionError,
# openai.error.RateLimitError,
# openai.error.ServiceUnavailableError,
# ]
# return create_base_retry_decorator(
# error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
# )
=======
def _create_retry_decorator( def _create_retry_decorator(
llm: Union[BaseOpenAI, OpenAIChat], llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[ run_manager: Optional[
@ -167,7 +124,6 @@ def _create_retry_decorator(
max_retries=llm.max_retries, max_retries=llm.max_retries,
run_manager=run_manager, run_manager=run_manager,
) )
>>>>>>> master
def completion_with_retry( def completion_with_retry(
@ -176,17 +132,11 @@ def completion_with_retry(
**kwargs: Any, **kwargs: Any,
) -> Any: ) -> Any:
"""Use tenacity to retry the completion call.""" """Use tenacity to retry the completion call."""
<<<<<<< HEAD
# retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
# @retry_decorator
=======
retry_decorator = _create_retry_decorator( retry_decorator = _create_retry_decorator(
llm, run_manager=run_manager llm, run_manager=run_manager
) )
@retry_decorator @retry_decorator
>>>>>>> master
def _completion_with_retry(**kwargs: Any) -> Any: def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.create(**kwargs) return llm.client.create(**kwargs)
@ -199,17 +149,11 @@ async def acompletion_with_retry(
**kwargs: Any, **kwargs: Any,
) -> Any: ) -> Any:
"""Use tenacity to retry the async completion call.""" """Use tenacity to retry the async completion call."""
<<<<<<< HEAD
# retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
# @retry_decorator
=======
retry_decorator = _create_retry_decorator( retry_decorator = _create_retry_decorator(
llm, run_manager=run_manager llm, run_manager=run_manager
) )
@retry_decorator @retry_decorator
>>>>>>> master
async def _completion_with_retry(**kwargs: Any) -> Any: async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api # Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs) return await llm.client.acreate(**kwargs)
@ -231,13 +175,9 @@ class BaseOpenAI(BaseLLM):
attributes["openai_api_base"] = self.openai_api_base attributes["openai_api_base"] = self.openai_api_base
if self.openai_organization != "": if self.openai_organization != "":
<<<<<<< HEAD
attributes["openai_organization"] = self.openai_organization
=======
attributes["openai_organization"] = ( attributes["openai_organization"] = (
self.openai_organization self.openai_organization
) )
>>>>>>> master
if self.openai_proxy != "": if self.openai_proxy != "":
attributes["openai_proxy"] = self.openai_proxy attributes["openai_proxy"] = self.openai_proxy
@ -276,11 +216,6 @@ class BaseOpenAI(BaseLLM):
openai_proxy: Optional[str] = None openai_proxy: Optional[str] = None
batch_size: int = 20 batch_size: int = 20
"""Batch size to use when passing multiple documents to generate.""" """Batch size to use when passing multiple documents to generate."""
<<<<<<< HEAD
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
=======
request_timeout: Optional[Union[float, Tuple[float, float]]] = ( request_timeout: Optional[Union[float, Tuple[float, float]]] = (
None None
) )
@ -288,7 +223,6 @@ class BaseOpenAI(BaseLLM):
logit_bias: Optional[Dict[str, float]] = Field( logit_bias: Optional[Dict[str, float]] = Field(
default_factory=dict default_factory=dict
) )
>>>>>>> master
"""Adjust the probability of specific tokens being generated.""" """Adjust the probability of specific tokens being generated."""
max_retries: int = 6 max_retries: int = 6
"""Maximum number of retries to make when generating.""" """Maximum number of retries to make when generating."""
@ -365,13 +299,9 @@ class BaseOpenAI(BaseLLM):
if values["streaming"] and values["n"] > 1: if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.") raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1: if values["streaming"] and values["best_of"] > 1:
<<<<<<< HEAD
raise ValueError("Cannot stream results when best_of > 1.")
=======
raise ValueError( raise ValueError(
"Cannot stream results when best_of > 1." "Cannot stream results when best_of > 1."
) )
>>>>>>> master
return values return values
@property @property
@ -403,13 +333,9 @@ class BaseOpenAI(BaseLLM):
**kwargs: Any, **kwargs: Any,
) -> Iterator[GenerationChunk]: ) -> Iterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True} params = {**self._invocation_params, **kwargs, "stream": True}
<<<<<<< HEAD
self.get_sub_prompts(params, [prompt], stop) # this mutates params
=======
self.get_sub_prompts( self.get_sub_prompts(
params, [prompt], stop params, [prompt], stop
) # this mutates params ) # this mutates params
>>>>>>> master
for stream_resp in completion_with_retry( for stream_resp in completion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params self, prompt=prompt, run_manager=run_manager, **params
): ):
@ -420,17 +346,11 @@ class BaseOpenAI(BaseLLM):
chunk.text, chunk.text,
chunk=chunk, chunk=chunk,
verbose=self.verbose, verbose=self.verbose,
<<<<<<< HEAD
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
=======
logprobs=( logprobs=(
chunk.generation_info["logprobs"] chunk.generation_info["logprobs"]
if chunk.generation_info if chunk.generation_info
else None else None
), ),
>>>>>>> master
) )
async def _astream( async def _astream(
@ -441,13 +361,9 @@ class BaseOpenAI(BaseLLM):
**kwargs: Any, **kwargs: Any,
) -> AsyncIterator[GenerationChunk]: ) -> AsyncIterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True} params = {**self._invocation_params, **kwargs, "stream": True}
<<<<<<< HEAD
self.get_sub_prompts(params, [prompt], stop) # this mutate params
=======
self.get_sub_prompts( self.get_sub_prompts(
params, [prompt], stop params, [prompt], stop
) # this mutate params ) # this mutate params
>>>>>>> master
async for stream_resp in await acompletion_with_retry( async for stream_resp in await acompletion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params self, prompt=prompt, run_manager=run_manager, **params
): ):
@ -458,17 +374,11 @@ class BaseOpenAI(BaseLLM):
chunk.text, chunk.text,
chunk=chunk, chunk=chunk,
verbose=self.verbose, verbose=self.verbose,
<<<<<<< HEAD
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
=======
logprobs=( logprobs=(
chunk.generation_info["logprobs"] chunk.generation_info["logprobs"]
if chunk.generation_info if chunk.generation_info
else None else None
), ),
>>>>>>> master
) )
def _generate( def _generate(
@ -504,12 +414,6 @@ class BaseOpenAI(BaseLLM):
for _prompts in sub_prompts: for _prompts in sub_prompts:
if self.streaming: if self.streaming:
if len(_prompts) > 1: if len(_prompts) > 1:
<<<<<<< HEAD
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):
=======
raise ValueError( raise ValueError(
"Cannot stream results with multiple prompts." "Cannot stream results with multiple prompts."
) )
@ -518,7 +422,6 @@ class BaseOpenAI(BaseLLM):
for chunk in self._stream( for chunk in self._stream(
_prompts[0], stop, run_manager, **kwargs _prompts[0], stop, run_manager, **kwargs
): ):
>>>>>>> master
if generation is None: if generation is None:
generation = chunk generation = chunk
else: else:
@ -527,14 +430,6 @@ class BaseOpenAI(BaseLLM):
choices.append( choices.append(
{ {
"text": generation.text, "text": generation.text,
<<<<<<< HEAD
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
=======
"finish_reason": ( "finish_reason": (
generation.generation_info.get( generation.generation_info.get(
"finish_reason" "finish_reason"
@ -547,19 +442,14 @@ class BaseOpenAI(BaseLLM):
if generation.generation_info if generation.generation_info
else None else None
), ),
>>>>>>> master
} }
) )
else: else:
response = completion_with_retry( response = completion_with_retry(
<<<<<<< HEAD
self, prompt=_prompts, run_manager=run_manager, **params
=======
self, self,
prompt=_prompts, prompt=_prompts,
run_manager=run_manager, run_manager=run_manager,
**params, **params,
>>>>>>> master
) )
choices.extend(response["choices"]) choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage) update_token_usage(_keys, response, token_usage)
@ -584,13 +474,9 @@ class BaseOpenAI(BaseLLM):
for _prompts in sub_prompts: for _prompts in sub_prompts:
if self.streaming: if self.streaming:
if len(_prompts) > 1: if len(_prompts) > 1:
<<<<<<< HEAD
raise ValueError("Cannot stream results with multiple prompts.")
=======
raise ValueError( raise ValueError(
"Cannot stream results with multiple prompts." "Cannot stream results with multiple prompts."
) )
>>>>>>> master
generation: Optional[GenerationChunk] = None generation: Optional[GenerationChunk] = None
async for chunk in self._astream( async for chunk in self._astream(
@ -604,14 +490,6 @@ class BaseOpenAI(BaseLLM):
choices.append( choices.append(
{ {
"text": generation.text, "text": generation.text,
<<<<<<< HEAD
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
=======
"finish_reason": ( "finish_reason": (
generation.generation_info.get( generation.generation_info.get(
"finish_reason" "finish_reason"
@ -624,19 +502,14 @@ class BaseOpenAI(BaseLLM):
if generation.generation_info if generation.generation_info
else None else None
), ),
>>>>>>> master
} }
) )
else: else:
response = await acompletion_with_retry( response = await acompletion_with_retry(
<<<<<<< HEAD
self, prompt=_prompts, run_manager=run_manager, **params
=======
self, self,
prompt=_prompts, prompt=_prompts,
run_manager=run_manager, run_manager=run_manager,
**params, **params,
>>>>>>> master
) )
choices.extend(response["choices"]) choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage) update_token_usage(_keys, response, token_usage)
@ -651,30 +524,20 @@ class BaseOpenAI(BaseLLM):
"""Get the sub prompts for llm call.""" """Get the sub prompts for llm call."""
if stop is not None: if stop is not None:
if "stop" in params: if "stop" in params:
<<<<<<< HEAD
raise ValueError("`stop` found in both the input and default params.")
=======
raise ValueError( raise ValueError(
"`stop` found in both the input and default" "`stop` found in both the input and default"
" params." " params."
) )
>>>>>>> master
params["stop"] = stop params["stop"] = stop
if params["max_tokens"] == -1: if params["max_tokens"] == -1:
if len(prompts) != 1: if len(prompts) != 1:
raise ValueError( raise ValueError(
<<<<<<< HEAD
"max_tokens set to -1 not supported for multiple inputs."
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
=======
"max_tokens set to -1 not supported for multiple" "max_tokens set to -1 not supported for multiple"
" inputs." " inputs."
) )
params["max_tokens"] = self.max_tokens_for_prompt( params["max_tokens"] = self.max_tokens_for_prompt(
prompts[0] prompts[0]
) )
>>>>>>> master
sub_prompts = [ sub_prompts = [
prompts[i : i + self.batch_size] prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size) for i in range(0, len(prompts), self.batch_size)
@ -682,14 +545,10 @@ class BaseOpenAI(BaseLLM):
return sub_prompts return sub_prompts
def create_llm_result( def create_llm_result(
<<<<<<< HEAD
self, choices: Any, prompts: List[str], token_usage: Dict[str, int]
=======
self, self,
choices: Any, choices: Any,
prompts: List[str], prompts: List[str],
token_usage: Dict[str, int], token_usage: Dict[str, int],
>>>>>>> master
) -> LLMResult: ) -> LLMResult:
"""Create the LLMResult from the choices and prompts.""" """Create the LLMResult from the choices and prompts."""
generations = [] generations = []
@ -707,10 +566,6 @@ class BaseOpenAI(BaseLLM):
for choice in sub_choices for choice in sub_choices
] ]
) )
<<<<<<< HEAD
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
return LLMResult(generations=generations, llm_output=llm_output)
=======
llm_output = { llm_output = {
"token_usage": token_usage, "token_usage": token_usage,
"model_name": self.model_name, "model_name": self.model_name,
@ -718,7 +573,6 @@ class BaseOpenAI(BaseLLM):
return LLMResult( return LLMResult(
generations=generations, llm_output=llm_output generations=generations, llm_output=llm_output
) )
>>>>>>> master
@property @property
def _invocation_params(self) -> Dict[str, Any]: def _invocation_params(self) -> Dict[str, Any]:
@ -726,37 +580,21 @@ class BaseOpenAI(BaseLLM):
openai_creds: Dict[str, Any] = { openai_creds: Dict[str, Any] = {
"api_key": self.openai_api_key, "api_key": self.openai_api_key,
"api_base": self.openai_api_base, "api_base": self.openai_api_base,
<<<<<<< HEAD
"organization": self.openai_organization,
=======
"organization": self.openai_organization, "organization": self.openai_organization,
>>>>>>> master
} }
if self.openai_proxy: if self.openai_proxy:
import openai import openai
<<<<<<< HEAD
# raise Exception("The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g.",
# 'OpenAI(proxy={
# "http": self.openai_proxy,
# "https": self.openai_proxy,
# })'") # type: ignore[assignment] # noqa: E501
=======
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
>>>>>>> master
return {**openai_creds, **self._default_params} return {**openai_creds, **self._default_params}
@property @property
def _identifying_params(self) -> Mapping[str, Any]: def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters.""" """Get the identifying parameters."""
<<<<<<< HEAD
return {**{"model_name": self.model_name}, **self._default_params}
=======
return { return {
**{"model_name": self.model_name}, **{"model_name": self.model_name},
**self._default_params, **self._default_params,
} }
>>>>>>> master
@property @property
def _llm_type(self) -> str: def _llm_type(self) -> str:
@ -772,29 +610,19 @@ class BaseOpenAI(BaseLLM):
import tiktoken import tiktoken
except ImportError: except ImportError:
raise ImportError( raise ImportError(
<<<<<<< HEAD
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
=======
"Could not import tiktoken python package. This is" "Could not import tiktoken python package. This is"
" needed in order to calculate get_num_tokens. Please" " needed in order to calculate get_num_tokens. Please"
" install it with `pip install tiktoken`." " install it with `pip install tiktoken`."
>>>>>>> master
) )
model_name = self.tiktoken_model_name or self.model_name model_name = self.tiktoken_model_name or self.model_name
try: try:
enc = tiktoken.encoding_for_model(model_name) enc = tiktoken.encoding_for_model(model_name)
except KeyError: except KeyError:
<<<<<<< HEAD
logger.warning("Warning: model not found. Using cl100k_base encoding.")
=======
logger.warning( logger.warning(
"Warning: model not found. Using cl100k_base" "Warning: model not found. Using cl100k_base"
" encoding." " encoding."
) )
>>>>>>> master
model = "cl100k_base" model = "cl100k_base"
enc = tiktoken.get_encoding(model) enc = tiktoken.get_encoding(model)
@ -855,14 +683,9 @@ class BaseOpenAI(BaseLLM):
if context_size is None: if context_size is None:
raise ValueError( raise ValueError(
<<<<<<< HEAD
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(model_token_mapping.keys())
=======
f"Unknown model: {modelname}. Please provide a valid" f"Unknown model: {modelname}. Please provide a valid"
" OpenAI model name.Known models are: " " OpenAI model name.Known models are: "
+ ", ".join(model_token_mapping.keys()) + ", ".join(model_token_mapping.keys())
>>>>>>> master
) )
return context_size return context_size
@ -908,14 +731,10 @@ class OpenAI(BaseOpenAI):
@property @property
def _invocation_params(self) -> Dict[str, Any]: def _invocation_params(self) -> Dict[str, Any]:
<<<<<<< HEAD
return {**{"model": self.model_name}, **super()._invocation_params}
=======
return { return {
**{"model": self.model_name}, **{"model": self.model_name},
**super()._invocation_params, **super()._invocation_params,
} }
>>>>>>> master
class AzureOpenAI(BaseOpenAI): class AzureOpenAI(BaseOpenAI):
@ -989,8 +808,6 @@ class OpenAIChat(BaseLLM):
Any parameters that are valid to be passed to the openai.create call can be passed Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class. in, even if not explicitly saved on this class.
<<<<<<< HEAD
=======
Args: Args:
model_name: The model name to use. model_name: The model name to use.
@ -1006,7 +823,6 @@ class OpenAIChat(BaseLLM):
>>>>>>> master
Example: Example:
.. code-block:: python .. code-block:: python
@ -1015,20 +831,10 @@ class OpenAIChat(BaseLLM):
""" """
client: Any #: :meta private: client: Any #: :meta private:
<<<<<<< HEAD
model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
openai_api_base: Optional[str] = None
# to support explicit proxy for OpenAI
=======
model_name: str = "gpt-3.5-turbo-1106" model_name: str = "gpt-3.5-turbo-1106"
model_kwargs: Dict[str, Any] = Field(default_factory=dict) model_kwargs: Dict[str, Any] = Field(default_factory=dict)
openai_api_key: Optional[str] = None openai_api_key: Optional[str] = None
openai_api_base: Optional[str] = None openai_api_base: Optional[str] = None
>>>>>>> master
openai_proxy: Optional[str] = None openai_proxy: Optional[str] = None
max_retries: int = 6 max_retries: int = 6
"""Maximum number of retries to make when generating.""" """Maximum number of retries to make when generating."""
@ -1044,25 +850,17 @@ class OpenAIChat(BaseLLM):
@root_validator(pre=True) @root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in.""" """Build extra kwargs from additional params that were passed in."""
<<<<<<< HEAD
all_required_field_names = {field.alias for field in cls.__fields__.values()}
=======
all_required_field_names = { all_required_field_names = {
field.alias for field in cls.__fields__.values() field.alias for field in cls.__fields__.values()
} }
>>>>>>> master
extra = values.get("model_kwargs", {}) extra = values.get("model_kwargs", {})
for field_name in list(values): for field_name in list(values):
if field_name not in all_required_field_names: if field_name not in all_required_field_names:
if field_name in extra: if field_name in extra:
<<<<<<< HEAD
raise ValueError(f"Found {field_name} supplied twice.")
=======
raise ValueError( raise ValueError(
f"Found {field_name} supplied twice." f"Found {field_name} supplied twice."
) )
>>>>>>> master
extra[field_name] = values.pop(field_name) extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra values["model_kwargs"] = extra
return values return values
@ -1086,25 +884,14 @@ class OpenAIChat(BaseLLM):
default="", default="",
) )
openai_organization = get_from_dict_or_env( openai_organization = get_from_dict_or_env(
<<<<<<< HEAD
values, "openai_organization", "OPENAI_ORGANIZATION", default=""
=======
values, values,
"openai_organization", "openai_organization",
"OPENAI_ORGANIZATION", "OPENAI_ORGANIZATION",
default="", default="",
>>>>>>> master
) )
try: try:
import openai import openai
<<<<<<< HEAD
if openai_api_base:
raise Exception("The 'openai.api_base' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(api_base=openai_api_base)'")
if openai_organization:
raise Exception("The 'openai.organization' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(organization=openai_organization)'")
=======
openai.api_key = openai_api_key openai.api_key = openai_api_key
if openai_api_base: if openai_api_base:
openai.api_base = openai_api_base openai.api_base = openai_api_base
@ -1112,7 +899,6 @@ class OpenAIChat(BaseLLM):
openai.organization = openai_organization openai.organization = openai_organization
if openai_proxy: if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
>>>>>>> master
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"Could not import openai python package. " "Could not import openai python package. "
@ -1122,16 +908,10 @@ class OpenAIChat(BaseLLM):
values["client"] = openai.ChatCompletion values["client"] = openai.ChatCompletion
except AttributeError: except AttributeError:
raise ValueError( raise ValueError(
<<<<<<< HEAD
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
=======
"`openai` has no `ChatCompletion` attribute, this is" "`openai` has no `ChatCompletion` attribute, this is"
" likely due to an old version of the openai package." " likely due to an old version of the openai package."
" Try upgrading it with `pip install --upgrade" " Try upgrading it with `pip install --upgrade"
" openai`." " openai`."
>>>>>>> master
) )
return values return values
@ -1145,15 +925,6 @@ class OpenAIChat(BaseLLM):
) -> Tuple: ) -> Tuple:
if len(prompts) > 1: if len(prompts) > 1:
raise ValueError( raise ValueError(
<<<<<<< HEAD
f"OpenAIChat currently only supports single prompt, got {prompts}"
)
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
=======
"OpenAIChat currently only supports single prompt," "OpenAIChat currently only supports single prompt,"
f" got {prompts}" f" got {prompts}"
) )
@ -1170,7 +941,6 @@ class OpenAIChat(BaseLLM):
"`stop` found in both the input and default" "`stop` found in both the input and default"
" params." " params."
) )
>>>>>>> master
params["stop"] = stop params["stop"] = stop
if params.get("max_tokens") == -1: if params.get("max_tokens") == -1:
# for ChatGPT api, omitting max_tokens is equivalent to having no limit # for ChatGPT api, omitting max_tokens is equivalent to having no limit
@ -1189,13 +959,9 @@ class OpenAIChat(BaseLLM):
for stream_resp in completion_with_retry( for stream_resp in completion_with_retry(
self, messages=messages, run_manager=run_manager, **params self, messages=messages, run_manager=run_manager, **params
): ):
<<<<<<< HEAD
token = stream_resp["choices"][0]["delta"].get("content", "")
=======
token = stream_resp["choices"][0]["delta"].get( token = stream_resp["choices"][0]["delta"].get(
"content", "" "content", ""
) )
>>>>>>> master
chunk = GenerationChunk(text=token) chunk = GenerationChunk(text=token)
yield chunk yield chunk
if run_manager: if run_manager:
@ -1213,13 +979,9 @@ class OpenAIChat(BaseLLM):
async for stream_resp in await acompletion_with_retry( async for stream_resp in await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params self, messages=messages, run_manager=run_manager, **params
): ):
<<<<<<< HEAD
token = stream_resp["choices"][0]["delta"].get("content", "")
=======
token = stream_resp["choices"][0]["delta"].get( token = stream_resp["choices"][0]["delta"].get(
"content", "" "content", ""
) )
>>>>>>> master
chunk = GenerationChunk(text=token) chunk = GenerationChunk(text=token)
yield chunk yield chunk
if run_manager: if run_manager:
@ -1234,13 +996,9 @@ class OpenAIChat(BaseLLM):
) -> LLMResult: ) -> LLMResult:
if self.streaming: if self.streaming:
generation: Optional[GenerationChunk] = None generation: Optional[GenerationChunk] = None
<<<<<<< HEAD
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
=======
for chunk in self._stream( for chunk in self._stream(
prompts[0], stop, run_manager, **kwargs prompts[0], stop, run_manager, **kwargs
): ):
>>>>>>> master
if generation is None: if generation is None:
generation = chunk generation = chunk
else: else:
@ -1259,9 +1017,6 @@ class OpenAIChat(BaseLLM):
} }
return LLMResult( return LLMResult(
generations=[ generations=[
<<<<<<< HEAD
[Generation(text=full_response["choices"][0]["message"]["content"])]
=======
[ [
Generation( Generation(
text=full_response["choices"][0]["message"][ text=full_response["choices"][0]["message"][
@ -1269,7 +1024,6 @@ class OpenAIChat(BaseLLM):
] ]
) )
] ]
>>>>>>> master
], ],
llm_output=llm_output, llm_output=llm_output,
) )
@ -1283,13 +1037,9 @@ class OpenAIChat(BaseLLM):
) -> LLMResult: ) -> LLMResult:
if self.streaming: if self.streaming:
generation: Optional[GenerationChunk] = None generation: Optional[GenerationChunk] = None
<<<<<<< HEAD
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
=======
async for chunk in self._astream( async for chunk in self._astream(
prompts[0], stop, run_manager, **kwargs prompts[0], stop, run_manager, **kwargs
): ):
>>>>>>> master
if generation is None: if generation is None:
generation = chunk generation = chunk
else: else:
@ -1308,9 +1058,6 @@ class OpenAIChat(BaseLLM):
} }
return LLMResult( return LLMResult(
generations=[ generations=[
<<<<<<< HEAD
[Generation(text=full_response["choices"][0]["message"]["content"])]
=======
[ [
Generation( Generation(
text=full_response["choices"][0]["message"][ text=full_response["choices"][0]["message"][
@ -1318,7 +1065,6 @@ class OpenAIChat(BaseLLM):
] ]
) )
] ]
>>>>>>> master
], ],
llm_output=llm_output, llm_output=llm_output,
) )
@ -1326,14 +1072,10 @@ class OpenAIChat(BaseLLM):
@property @property
def _identifying_params(self) -> Mapping[str, Any]: def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters.""" """Get the identifying parameters."""
<<<<<<< HEAD
return {**{"model_name": self.model_name}, **self._default_params}
=======
return { return {
**{"model_name": self.model_name}, **{"model_name": self.model_name},
**self._default_params, **self._default_params,
} }
>>>>>>> master
@property @property
def _llm_type(self) -> str: def _llm_type(self) -> str:
@ -1349,15 +1091,9 @@ class OpenAIChat(BaseLLM):
import tiktoken import tiktoken
except ImportError: except ImportError:
raise ImportError( raise ImportError(
<<<<<<< HEAD
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
=======
"Could not import tiktoken python package. This is" "Could not import tiktoken python package. This is"
" needed in order to calculate get_num_tokens. Please" " needed in order to calculate get_num_tokens. Please"
" install it with `pip install tiktoken`." " install it with `pip install tiktoken`."
>>>>>>> master
) )
enc = tiktoken.encoding_for_model(self.model_name) enc = tiktoken.encoding_for_model(self.model_name)

Loading…
Cancel
Save