flake8 cleanup

pull/380/head
evelynmitchell 1 year ago
parent d95ee67fbf
commit 1ed5b1387c

@ -386,7 +386,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append( tokens.append(
token[j : j + self.embedding_ctx_length] token[j: j + self.embedding_ctx_length]
) )
indices.append(i) indices.append(i)
@ -406,7 +406,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in _iter: for i in _iter:
response = embed_with_retry( response = embed_with_retry(
self, self,
input=tokens[i : i + _chunk_size], input=tokens[i: i + _chunk_size],
**self._invocation_params, **self._invocation_params,
) )
batched_embeddings.extend( batched_embeddings.extend(
@ -486,7 +486,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append( tokens.append(
token[j : j + self.embedding_ctx_length] token[j: j + self.embedding_ctx_length]
) )
indices.append(i) indices.append(i)
@ -495,7 +495,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in range(0, len(tokens), _chunk_size): for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry( response = await async_embed_with_retry(
self, self,
input=tokens[i : i + _chunk_size], input=tokens[i: i + _chunk_size],
**self._invocation_params, **self._invocation_params,
) )
batched_embeddings.extend( batched_embeddings.extend(

@ -41,15 +41,9 @@ def _create_retry_decorator() -> Callable[[Any], Any]:
multiplier=multiplier, min=min_seconds, max=max_seconds multiplier=multiplier, min=min_seconds, max=max_seconds
), ),
retry=( retry=(
retry_if_exception_type( retry_if_exception_type(google.api_core.exceptions.ResourceExhausted)
google.api_core.exceptions.ResourceExhausted | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable)
) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError)
| retry_if_exception_type(
google.api_core.exceptions.ServiceUnavailable
)
| retry_if_exception_type(
google.api_core.exceptions.GoogleAPIError
)
), ),
before_sleep=before_sleep_log(logger, logging.WARNING), before_sleep=before_sleep_log(logger, logging.WARNING),
) )
@ -123,30 +117,17 @@ class GooglePalm(BaseLLM, BaseModel):
values["client"] = genai values["client"] = genai
if ( if (values["temperature"] is not None and not 0 <= values["temperature"] <= 1):
values["temperature"] is not None raise ValueError("temperature must be in the range [0.0, 1.0]")
and not 0 <= values["temperature"] <= 1
):
raise ValueError(
"temperature must be in the range [0.0, 1.0]"
)
if ( if (values["top_p"] is not None and not 0 <= values["top_p"] <= 1):
values["top_p"] is not None
and not 0 <= values["top_p"] <= 1
):
raise ValueError("top_p must be in the range [0.0, 1.0]") raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0: if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive") raise ValueError("top_k must be positive")
if ( if (values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0):
values["max_output_tokens"] is not None raise ValueError("max_output_tokens must be greater than zero")
and values["max_output_tokens"] <= 0
):
raise ValueError(
"max_output_tokens must be greater than zero"
)
return values return values

Loading…
Cancel
Save