sync main with master fix merge errors

pull/298/head
Zack 1 year ago
parent c4c7f0d847
commit 74cf494e23

@ -46,12 +46,4 @@ jobs:
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
<<<<<<< HEAD
<<<<<<< HEAD
poetry publish
=======
poetry publish
>>>>>>> model-flow
=======
poetry publish
>>>>>>> master

19
.gitignore vendored

@ -9,6 +9,10 @@ video/
dataframe/
static/generated
<<<<<<< HEAD
=======
runs
>>>>>>> master
swarms/__pycache__
venv
.DS_Store
@ -19,6 +23,10 @@ swarms/agents/.DS_Store
_build
stderr_log.txt
<<<<<<< HEAD
=======
.vscode
>>>>>>> master
.DS_STORE
# Byte-compiled / optimized / DLL files
__pycache__/
@ -34,8 +42,12 @@ error.txt
errors.txt
<<<<<<< HEAD
models/
=======
Autonomous-Agent-XYZ1B_state.json
>>>>>>> master
# Distribution / packaging
.Python
build/
@ -157,7 +169,10 @@ venv/
ENV/
env.bak/
venv.bak/
<<<<<<< HEAD
secret_keys.sh
=======
>>>>>>> master
# Spyder project settings
.spyderproject
@ -189,3 +204,7 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
<<<<<<< HEAD
=======
.vscode/settings.json
>>>>>>> master

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

After

Width:  |  Height:  |  Size: 132 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.0 MiB

After

Width:  |  Height:  |  Size: 132 B

@ -27,14 +27,10 @@ from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import Generation, LLMResult
from langchain.schema.output import GenerationChunk
<<<<<<< HEAD
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
=======
from langchain.utils import (
get_from_dict_or_env,
get_pydantic_field_names,
)
>>>>>>> master
from langchain.utils.utils import build_extra_kwargs
@ -51,13 +47,9 @@ def is_openai_v1() -> bool:
def update_token_usage(
<<<<<<< HEAD
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
=======
keys: Set[str],
response: Dict[str, Any],
token_usage: Dict[str, Any],
>>>>>>> master
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
@ -75,30 +67,16 @@ def _stream_response_to_generation_chunk(
return GenerationChunk(
text=stream_response["choices"][0]["text"],
generation_info=dict(
<<<<<<< HEAD
finish_reason=stream_response["choices"][0].get("finish_reason", None),
logprobs=stream_response["choices"][0].get("logprobs", None),
=======
finish_reason=stream_response["choices"][0].get(
"finish_reason", None
),
logprobs=stream_response["choices"][0].get(
"logprobs", None
),
>>>>>>> master
),
)
<<<<<<< HEAD
def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:
"""Update response from the stream response."""
response["choices"][0]["text"] += stream_response["choices"][0]["text"]
response["choices"][0]["finish_reason"] = stream_response["choices"][0].get(
"finish_reason", None
)
response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"]
=======
def _update_response(
response: Dict[str, Any], stream_response: Dict[str, Any]
) -> None:
@ -112,7 +90,6 @@ def _update_response(
response["choices"][0]["logprobs"] = stream_response["choices"][
0
]["logprobs"]
>>>>>>> master
def _streaming_response_template() -> Dict[str, Any]:
@ -127,26 +104,6 @@ def _streaming_response_template() -> Dict[str, Any]:
}
<<<<<<< HEAD
# def _create_retry_decorator(
# llm: Union[BaseOpenAI, OpenAIChat],
# run_manager: Optional[
# Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
# ] = None,
# ) -> Callable[[Any], Any]:
# import openai
# errors = [
# openai.Timeout,
# openai.APIError,
# openai.error.APIConnectionError,
# openai.error.RateLimitError,
# openai.error.ServiceUnavailableError,
# ]
# return create_base_retry_decorator(
# error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
# )
=======
def _create_retry_decorator(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[
@ -167,7 +124,6 @@ def _create_retry_decorator(
max_retries=llm.max_retries,
run_manager=run_manager,
)
>>>>>>> master
def completion_with_retry(
@ -176,17 +132,11 @@ def completion_with_retry(
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
<<<<<<< HEAD
# retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
# @retry_decorator
=======
retry_decorator = _create_retry_decorator(
llm, run_manager=run_manager
)
@retry_decorator
>>>>>>> master
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.create(**kwargs)
@ -199,17 +149,11 @@ async def acompletion_with_retry(
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
<<<<<<< HEAD
# retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
# @retry_decorator
=======
retry_decorator = _create_retry_decorator(
llm, run_manager=run_manager
)
@retry_decorator
>>>>>>> master
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
@ -231,13 +175,9 @@ class BaseOpenAI(BaseLLM):
attributes["openai_api_base"] = self.openai_api_base
if self.openai_organization != "":
<<<<<<< HEAD
attributes["openai_organization"] = self.openai_organization
=======
attributes["openai_organization"] = (
self.openai_organization
)
>>>>>>> master
if self.openai_proxy != "":
attributes["openai_proxy"] = self.openai_proxy
@ -276,11 +216,6 @@ class BaseOpenAI(BaseLLM):
openai_proxy: Optional[str] = None
batch_size: int = 20
"""Batch size to use when passing multiple documents to generate."""
<<<<<<< HEAD
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
=======
request_timeout: Optional[Union[float, Tuple[float, float]]] = (
None
)
@ -288,7 +223,6 @@ class BaseOpenAI(BaseLLM):
logit_bias: Optional[Dict[str, float]] = Field(
default_factory=dict
)
>>>>>>> master
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
@ -365,13 +299,9 @@ class BaseOpenAI(BaseLLM):
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
<<<<<<< HEAD
raise ValueError("Cannot stream results when best_of > 1.")
=======
raise ValueError(
"Cannot stream results when best_of > 1."
)
>>>>>>> master
return values
@property
@ -403,13 +333,9 @@ class BaseOpenAI(BaseLLM):
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
<<<<<<< HEAD
self.get_sub_prompts(params, [prompt], stop) # this mutates params
=======
self.get_sub_prompts(
params, [prompt], stop
) # this mutates params
>>>>>>> master
for stream_resp in completion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
@ -420,17 +346,11 @@ class BaseOpenAI(BaseLLM):
chunk.text,
chunk=chunk,
verbose=self.verbose,
<<<<<<< HEAD
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
=======
logprobs=(
chunk.generation_info["logprobs"]
if chunk.generation_info
else None
),
>>>>>>> master
)
async def _astream(
@ -441,13 +361,9 @@ class BaseOpenAI(BaseLLM):
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
<<<<<<< HEAD
self.get_sub_prompts(params, [prompt], stop) # this mutate params
=======
self.get_sub_prompts(
params, [prompt], stop
) # this mutate params
>>>>>>> master
async for stream_resp in await acompletion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
@ -458,17 +374,11 @@ class BaseOpenAI(BaseLLM):
chunk.text,
chunk=chunk,
verbose=self.verbose,
<<<<<<< HEAD
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
=======
logprobs=(
chunk.generation_info["logprobs"]
if chunk.generation_info
else None
),
>>>>>>> master
)
def _generate(
@ -504,12 +414,6 @@ class BaseOpenAI(BaseLLM):
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
<<<<<<< HEAD
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):
=======
raise ValueError(
"Cannot stream results with multiple prompts."
)
@ -518,7 +422,6 @@ class BaseOpenAI(BaseLLM):
for chunk in self._stream(
_prompts[0], stop, run_manager, **kwargs
):
>>>>>>> master
if generation is None:
generation = chunk
else:
@ -527,14 +430,6 @@ class BaseOpenAI(BaseLLM):
choices.append(
{
"text": generation.text,
<<<<<<< HEAD
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
=======
"finish_reason": (
generation.generation_info.get(
"finish_reason"
@ -547,19 +442,14 @@ class BaseOpenAI(BaseLLM):
if generation.generation_info
else None
),
>>>>>>> master
}
)
else:
response = completion_with_retry(
<<<<<<< HEAD
self, prompt=_prompts, run_manager=run_manager, **params
=======
self,
prompt=_prompts,
run_manager=run_manager,
**params,
>>>>>>> master
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
@ -584,13 +474,9 @@ class BaseOpenAI(BaseLLM):
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
<<<<<<< HEAD
raise ValueError("Cannot stream results with multiple prompts.")
=======
raise ValueError(
"Cannot stream results with multiple prompts."
)
>>>>>>> master
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(
@ -604,14 +490,6 @@ class BaseOpenAI(BaseLLM):
choices.append(
{
"text": generation.text,
<<<<<<< HEAD
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
=======
"finish_reason": (
generation.generation_info.get(
"finish_reason"
@ -624,19 +502,14 @@ class BaseOpenAI(BaseLLM):
if generation.generation_info
else None
),
>>>>>>> master
}
)
else:
response = await acompletion_with_retry(
<<<<<<< HEAD
self, prompt=_prompts, run_manager=run_manager, **params
=======
self,
prompt=_prompts,
run_manager=run_manager,
**params,
>>>>>>> master
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
@ -651,30 +524,20 @@ class BaseOpenAI(BaseLLM):
"""Get the sub prompts for llm call."""
if stop is not None:
if "stop" in params:
<<<<<<< HEAD
raise ValueError("`stop` found in both the input and default params.")
=======
raise ValueError(
"`stop` found in both the input and default"
" params."
)
>>>>>>> master
params["stop"] = stop
if params["max_tokens"] == -1:
if len(prompts) != 1:
raise ValueError(
<<<<<<< HEAD
"max_tokens set to -1 not supported for multiple inputs."
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
=======
"max_tokens set to -1 not supported for multiple"
" inputs."
)
params["max_tokens"] = self.max_tokens_for_prompt(
prompts[0]
)
>>>>>>> master
sub_prompts = [
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
@ -682,14 +545,10 @@ class BaseOpenAI(BaseLLM):
return sub_prompts
def create_llm_result(
<<<<<<< HEAD
self, choices: Any, prompts: List[str], token_usage: Dict[str, int]
=======
self,
choices: Any,
prompts: List[str],
token_usage: Dict[str, int],
>>>>>>> master
) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
@ -707,10 +566,6 @@ class BaseOpenAI(BaseLLM):
for choice in sub_choices
]
)
<<<<<<< HEAD
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
return LLMResult(generations=generations, llm_output=llm_output)
=======
llm_output = {
"token_usage": token_usage,
"model_name": self.model_name,
@ -718,7 +573,6 @@ class BaseOpenAI(BaseLLM):
return LLMResult(
generations=generations, llm_output=llm_output
)
>>>>>>> master
@property
def _invocation_params(self) -> Dict[str, Any]:
@ -726,37 +580,21 @@ class BaseOpenAI(BaseLLM):
openai_creds: Dict[str, Any] = {
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
<<<<<<< HEAD
"organization": self.openai_organization,
=======
"organization": self.openai_organization,
>>>>>>> master
}
if self.openai_proxy:
import openai
<<<<<<< HEAD
# raise Exception("The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g.",
# 'OpenAI(proxy={
# "http": self.openai_proxy,
# "https": self.openai_proxy,
# })'") # type: ignore[assignment] # noqa: E501
=======
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
>>>>>>> master
return {**openai_creds, **self._default_params}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
<<<<<<< HEAD
return {**{"model_name": self.model_name}, **self._default_params}
=======
return {
**{"model_name": self.model_name},
**self._default_params,
}
>>>>>>> master
@property
def _llm_type(self) -> str:
@ -772,29 +610,19 @@ class BaseOpenAI(BaseLLM):
import tiktoken
except ImportError:
raise ImportError(
<<<<<<< HEAD
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
=======
"Could not import tiktoken python package. This is"
" needed in order to calculate get_num_tokens. Please"
" install it with `pip install tiktoken`."
>>>>>>> master
)
model_name = self.tiktoken_model_name or self.model_name
try:
enc = tiktoken.encoding_for_model(model_name)
except KeyError:
<<<<<<< HEAD
logger.warning("Warning: model not found. Using cl100k_base encoding.")
=======
logger.warning(
"Warning: model not found. Using cl100k_base"
" encoding."
)
>>>>>>> master
model = "cl100k_base"
enc = tiktoken.get_encoding(model)
@ -855,14 +683,9 @@ class BaseOpenAI(BaseLLM):
if context_size is None:
raise ValueError(
<<<<<<< HEAD
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(model_token_mapping.keys())
=======
f"Unknown model: {modelname}. Please provide a valid"
" OpenAI model name.Known models are: "
+ ", ".join(model_token_mapping.keys())
>>>>>>> master
)
return context_size
@ -908,14 +731,10 @@ class OpenAI(BaseOpenAI):
@property
def _invocation_params(self) -> Dict[str, Any]:
<<<<<<< HEAD
return {**{"model": self.model_name}, **super()._invocation_params}
=======
return {
**{"model": self.model_name},
**super()._invocation_params,
}
>>>>>>> master
class AzureOpenAI(BaseOpenAI):
@ -989,8 +808,6 @@ class OpenAIChat(BaseLLM):
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
<<<<<<< HEAD
=======
Args:
model_name: The model name to use.
@ -1006,7 +823,6 @@ class OpenAIChat(BaseLLM):
>>>>>>> master
Example:
.. code-block:: python
@ -1015,20 +831,10 @@ class OpenAIChat(BaseLLM):
"""
client: Any #: :meta private:
<<<<<<< HEAD
model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
openai_api_base: Optional[str] = None
# to support explicit proxy for OpenAI
=======
model_name: str = "gpt-3.5-turbo-1106"
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
openai_api_key: Optional[str] = None
openai_api_base: Optional[str] = None
>>>>>>> master
openai_proxy: Optional[str] = None
max_retries: int = 6
"""Maximum number of retries to make when generating."""
@ -1044,25 +850,17 @@ class OpenAIChat(BaseLLM):
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
<<<<<<< HEAD
all_required_field_names = {field.alias for field in cls.__fields__.values()}
=======
all_required_field_names = {
field.alias for field in cls.__fields__.values()
}
>>>>>>> master
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
<<<<<<< HEAD
raise ValueError(f"Found {field_name} supplied twice.")
=======
raise ValueError(
f"Found {field_name} supplied twice."
)
>>>>>>> master
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@ -1086,25 +884,14 @@ class OpenAIChat(BaseLLM):
default="",
)
openai_organization = get_from_dict_or_env(
<<<<<<< HEAD
values, "openai_organization", "OPENAI_ORGANIZATION", default=""
=======
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
>>>>>>> master
)
try:
import openai
<<<<<<< HEAD
if openai_api_base:
raise Exception("The 'openai.api_base' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(api_base=openai_api_base)'")
if openai_organization:
raise Exception("The 'openai.organization' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(organization=openai_organization)'")
=======
openai.api_key = openai_api_key
if openai_api_base:
openai.api_base = openai_api_base
@ -1112,7 +899,6 @@ class OpenAIChat(BaseLLM):
openai.organization = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
>>>>>>> master
except ImportError:
raise ImportError(
"Could not import openai python package. "
@ -1122,16 +908,10 @@ class OpenAIChat(BaseLLM):
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
<<<<<<< HEAD
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
=======
"`openai` has no `ChatCompletion` attribute, this is"
" likely due to an old version of the openai package."
" Try upgrading it with `pip install --upgrade"
" openai`."
>>>>>>> master
)
return values
@ -1145,15 +925,6 @@ class OpenAIChat(BaseLLM):
) -> Tuple:
if len(prompts) > 1:
raise ValueError(
<<<<<<< HEAD
f"OpenAIChat currently only supports single prompt, got {prompts}"
)
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
=======
"OpenAIChat currently only supports single prompt,"
f" got {prompts}"
)
@ -1170,7 +941,6 @@ class OpenAIChat(BaseLLM):
"`stop` found in both the input and default"
" params."
)
>>>>>>> master
params["stop"] = stop
if params.get("max_tokens") == -1:
# for ChatGPT api, omitting max_tokens is equivalent to having no limit
@ -1189,13 +959,9 @@ class OpenAIChat(BaseLLM):
for stream_resp in completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
<<<<<<< HEAD
token = stream_resp["choices"][0]["delta"].get("content", "")
=======
token = stream_resp["choices"][0]["delta"].get(
"content", ""
)
>>>>>>> master
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
@ -1213,13 +979,9 @@ class OpenAIChat(BaseLLM):
async for stream_resp in await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
<<<<<<< HEAD
token = stream_resp["choices"][0]["delta"].get("content", "")
=======
token = stream_resp["choices"][0]["delta"].get(
"content", ""
)
>>>>>>> master
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
@ -1234,13 +996,9 @@ class OpenAIChat(BaseLLM):
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
<<<<<<< HEAD
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
=======
for chunk in self._stream(
prompts[0], stop, run_manager, **kwargs
):
>>>>>>> master
if generation is None:
generation = chunk
else:
@ -1259,9 +1017,6 @@ class OpenAIChat(BaseLLM):
}
return LLMResult(
generations=[
<<<<<<< HEAD
[Generation(text=full_response["choices"][0]["message"]["content"])]
=======
[
Generation(
text=full_response["choices"][0]["message"][
@ -1269,7 +1024,6 @@ class OpenAIChat(BaseLLM):
]
)
]
>>>>>>> master
],
llm_output=llm_output,
)
@ -1283,13 +1037,9 @@ class OpenAIChat(BaseLLM):
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
<<<<<<< HEAD
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
=======
async for chunk in self._astream(
prompts[0], stop, run_manager, **kwargs
):
>>>>>>> master
if generation is None:
generation = chunk
else:
@ -1308,9 +1058,6 @@ class OpenAIChat(BaseLLM):
}
return LLMResult(
generations=[
<<<<<<< HEAD
[Generation(text=full_response["choices"][0]["message"]["content"])]
=======
[
Generation(
text=full_response["choices"][0]["message"][
@ -1318,7 +1065,6 @@ class OpenAIChat(BaseLLM):
]
)
]
>>>>>>> master
],
llm_output=llm_output,
)
@ -1326,14 +1072,10 @@ class OpenAIChat(BaseLLM):
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
<<<<<<< HEAD
return {**{"model_name": self.model_name}, **self._default_params}
=======
return {
**{"model_name": self.model_name},
**self._default_params,
}
>>>>>>> master
@property
def _llm_type(self) -> str:
@ -1349,15 +1091,9 @@ class OpenAIChat(BaseLLM):
import tiktoken
except ImportError:
raise ImportError(
<<<<<<< HEAD
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
=======
"Could not import tiktoken python package. This is"
" needed in order to calculate get_num_tokens. Please"
" install it with `pip install tiktoken`."
>>>>>>> master
)
enc = tiktoken.encoding_for_model(self.model_name)

Loading…
Cancel
Save