|
|
@ -42,7 +42,9 @@ def test_cohere_async_api_error_handling(cohere_instance):
|
|
|
|
cohere_instance.model = "base"
|
|
|
|
cohere_instance.model = "base"
|
|
|
|
cohere_instance.cohere_api_key = "invalid-api-key"
|
|
|
|
cohere_instance.cohere_api_key = "invalid-api-key"
|
|
|
|
with pytest.raises(Exception):
|
|
|
|
with pytest.raises(Exception):
|
|
|
|
cohere_instance.async_call("Error handling with invalid API key.")
|
|
|
|
cohere_instance.async_call(
|
|
|
|
|
|
|
|
"Error handling with invalid API key."
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_stream_api_error_handling(cohere_instance):
|
|
|
|
def test_cohere_stream_api_error_handling(cohere_instance):
|
|
|
@ -51,7 +53,8 @@ def test_cohere_stream_api_error_handling(cohere_instance):
|
|
|
|
cohere_instance.cohere_api_key = "invalid-api-key"
|
|
|
|
cohere_instance.cohere_api_key = "invalid-api-key"
|
|
|
|
with pytest.raises(Exception):
|
|
|
|
with pytest.raises(Exception):
|
|
|
|
generator = cohere_instance.stream(
|
|
|
|
generator = cohere_instance.stream(
|
|
|
|
"Error handling with invalid API key.")
|
|
|
|
"Error handling with invalid API key."
|
|
|
|
|
|
|
|
)
|
|
|
|
for token in generator:
|
|
|
|
for token in generator:
|
|
|
|
pass
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
@ -91,26 +94,31 @@ def test_cohere_convert_prompt(cohere_instance):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_call_with_stop(cohere_instance):
|
|
|
|
def test_cohere_call_with_stop(cohere_instance):
|
|
|
|
response = cohere_instance("Translate to French.", stop=["stop1", "stop2"])
|
|
|
|
response = cohere_instance(
|
|
|
|
|
|
|
|
"Translate to French.", stop=["stop1", "stop2"]
|
|
|
|
|
|
|
|
)
|
|
|
|
assert response == "Mocked Response from Cohere"
|
|
|
|
assert response == "Mocked Response from Cohere"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_stream_with_stop(cohere_instance):
|
|
|
|
def test_cohere_stream_with_stop(cohere_instance):
|
|
|
|
generator = cohere_instance.stream("Write a story.",
|
|
|
|
generator = cohere_instance.stream(
|
|
|
|
stop=["stop1", "stop2"])
|
|
|
|
"Write a story.", stop=["stop1", "stop2"]
|
|
|
|
|
|
|
|
)
|
|
|
|
for token in generator:
|
|
|
|
for token in generator:
|
|
|
|
assert isinstance(token, str)
|
|
|
|
assert isinstance(token, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_call_with_stop(cohere_instance):
|
|
|
|
def test_cohere_async_call_with_stop(cohere_instance):
|
|
|
|
response = cohere_instance.async_call("Tell me a joke.",
|
|
|
|
response = cohere_instance.async_call(
|
|
|
|
stop=["stop1", "stop2"])
|
|
|
|
"Tell me a joke.", stop=["stop1", "stop2"]
|
|
|
|
|
|
|
|
)
|
|
|
|
assert response == "Mocked Response from Cohere"
|
|
|
|
assert response == "Mocked Response from Cohere"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_stream_with_stop(cohere_instance):
|
|
|
|
def test_cohere_async_stream_with_stop(cohere_instance):
|
|
|
|
async_generator = cohere_instance.async_stream("Translate to French.",
|
|
|
|
async_generator = cohere_instance.async_stream(
|
|
|
|
stop=["stop1", "stop2"])
|
|
|
|
"Translate to French.", stop=["stop1", "stop2"]
|
|
|
|
|
|
|
|
)
|
|
|
|
for token in async_generator:
|
|
|
|
for token in async_generator:
|
|
|
|
assert isinstance(token, str)
|
|
|
|
assert isinstance(token, str)
|
|
|
|
|
|
|
|
|
|
|
@ -166,8 +174,12 @@ def test_base_cohere_validate_environment_without_cohere():
|
|
|
|
# Test cases for benchmarking generations with various models
|
|
|
|
# Test cases for benchmarking generations with various models
|
|
|
|
def test_cohere_generate_with_command_light(cohere_instance):
|
|
|
|
def test_cohere_generate_with_command_light(cohere_instance):
|
|
|
|
cohere_instance.model = "command-light"
|
|
|
|
cohere_instance.model = "command-light"
|
|
|
|
response = cohere_instance("Generate text with Command Light model.")
|
|
|
|
response = cohere_instance(
|
|
|
|
assert response.startswith("Generated text with Command Light model")
|
|
|
|
"Generate text with Command Light model."
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
assert response.startswith(
|
|
|
|
|
|
|
|
"Generated text with Command Light model"
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_generate_with_command(cohere_instance):
|
|
|
|
def test_cohere_generate_with_command(cohere_instance):
|
|
|
@ -190,54 +202,74 @@ def test_cohere_generate_with_base(cohere_instance):
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_generate_with_embed_english_v2(cohere_instance):
|
|
|
|
def test_cohere_generate_with_embed_english_v2(cohere_instance):
|
|
|
|
cohere_instance.model = "embed-english-v2.0"
|
|
|
|
cohere_instance.model = "embed-english-v2.0"
|
|
|
|
response = cohere_instance("Generate embeddings with English v2.0 model.")
|
|
|
|
response = cohere_instance(
|
|
|
|
assert response.startswith("Generated embeddings with English v2.0 model")
|
|
|
|
"Generate embeddings with English v2.0 model."
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
assert response.startswith(
|
|
|
|
|
|
|
|
"Generated embeddings with English v2.0 model"
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_generate_with_embed_english_light_v2(cohere_instance):
|
|
|
|
def test_cohere_generate_with_embed_english_light_v2(cohere_instance):
|
|
|
|
cohere_instance.model = "embed-english-light-v2.0"
|
|
|
|
cohere_instance.model = "embed-english-light-v2.0"
|
|
|
|
response = cohere_instance(
|
|
|
|
response = cohere_instance(
|
|
|
|
"Generate embeddings with English Light v2.0 model.")
|
|
|
|
"Generate embeddings with English Light v2.0 model."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert response.startswith(
|
|
|
|
assert response.startswith(
|
|
|
|
"Generated embeddings with English Light v2.0 model")
|
|
|
|
"Generated embeddings with English Light v2.0 model"
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_generate_with_embed_multilingual_v2(cohere_instance):
|
|
|
|
def test_cohere_generate_with_embed_multilingual_v2(cohere_instance):
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
response = cohere_instance(
|
|
|
|
response = cohere_instance(
|
|
|
|
"Generate embeddings with Multilingual v2.0 model.")
|
|
|
|
"Generate embeddings with Multilingual v2.0 model."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert response.startswith(
|
|
|
|
assert response.startswith(
|
|
|
|
"Generated embeddings with Multilingual v2.0 model")
|
|
|
|
"Generated embeddings with Multilingual v2.0 model"
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_generate_with_embed_english_v3(cohere_instance):
|
|
|
|
def test_cohere_generate_with_embed_english_v3(cohere_instance):
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
response = cohere_instance("Generate embeddings with English v3.0 model.")
|
|
|
|
response = cohere_instance(
|
|
|
|
assert response.startswith("Generated embeddings with English v3.0 model")
|
|
|
|
"Generate embeddings with English v3.0 model."
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
assert response.startswith(
|
|
|
|
|
|
|
|
"Generated embeddings with English v3.0 model"
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_generate_with_embed_english_light_v3(cohere_instance):
|
|
|
|
def test_cohere_generate_with_embed_english_light_v3(cohere_instance):
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
response = cohere_instance(
|
|
|
|
response = cohere_instance(
|
|
|
|
"Generate embeddings with English Light v3.0 model.")
|
|
|
|
"Generate embeddings with English Light v3.0 model."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert response.startswith(
|
|
|
|
assert response.startswith(
|
|
|
|
"Generated embeddings with English Light v3.0 model")
|
|
|
|
"Generated embeddings with English Light v3.0 model"
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_generate_with_embed_multilingual_v3(cohere_instance):
|
|
|
|
def test_cohere_generate_with_embed_multilingual_v3(cohere_instance):
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
response = cohere_instance(
|
|
|
|
response = cohere_instance(
|
|
|
|
"Generate embeddings with Multilingual v3.0 model.")
|
|
|
|
"Generate embeddings with Multilingual v3.0 model."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert response.startswith(
|
|
|
|
assert response.startswith(
|
|
|
|
"Generated embeddings with Multilingual v3.0 model")
|
|
|
|
"Generated embeddings with Multilingual v3.0 model"
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_generate_with_embed_multilingual_light_v3(cohere_instance,):
|
|
|
|
def test_cohere_generate_with_embed_multilingual_light_v3(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
response = cohere_instance(
|
|
|
|
response = cohere_instance(
|
|
|
|
"Generate embeddings with Multilingual Light v3.0 model.")
|
|
|
|
"Generate embeddings with Multilingual Light v3.0 model."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert response.startswith(
|
|
|
|
assert response.startswith(
|
|
|
|
"Generated embeddings with Multilingual Light v3.0 model")
|
|
|
|
"Generated embeddings with Multilingual Light v3.0 model"
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Add more test cases to benchmark other models and functionalities
|
|
|
|
# Add more test cases to benchmark other models and functionalities
|
|
|
@ -267,13 +299,17 @@ def test_cohere_call_with_embed_english_v3_model(cohere_instance):
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_call_with_embed_multilingual_v2_model(cohere_instance,):
|
|
|
|
def test_cohere_call_with_embed_multilingual_v2_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
response = cohere_instance("Translate to French.")
|
|
|
|
response = cohere_instance("Translate to French.")
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_call_with_embed_multilingual_v3_model(cohere_instance,):
|
|
|
|
def test_cohere_call_with_embed_multilingual_v3_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
response = cohere_instance("Translate to French.")
|
|
|
|
response = cohere_instance("Translate to French.")
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
@ -293,7 +329,9 @@ def test_cohere_call_with_long_prompt(cohere_instance):
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_call_with_max_tokens_limit_exceeded(cohere_instance):
|
|
|
|
def test_cohere_call_with_max_tokens_limit_exceeded(cohere_instance):
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
prompt = ("This is a test prompt that will exceed the max tokens limit.")
|
|
|
|
prompt = (
|
|
|
|
|
|
|
|
"This is a test prompt that will exceed the max tokens limit."
|
|
|
|
|
|
|
|
)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
cohere_instance(prompt)
|
|
|
|
cohere_instance(prompt)
|
|
|
|
|
|
|
|
|
|
|
@ -326,14 +364,18 @@ def test_cohere_stream_with_embed_english_v3_model(cohere_instance):
|
|
|
|
assert isinstance(token, str)
|
|
|
|
assert isinstance(token, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_stream_with_embed_multilingual_v2_model(cohere_instance,):
|
|
|
|
def test_cohere_stream_with_embed_multilingual_v2_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
generator = cohere_instance.stream("Write a story.")
|
|
|
|
generator = cohere_instance.stream("Write a story.")
|
|
|
|
for token in generator:
|
|
|
|
for token in generator:
|
|
|
|
assert isinstance(token, str)
|
|
|
|
assert isinstance(token, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_stream_with_embed_multilingual_v3_model(cohere_instance,):
|
|
|
|
def test_cohere_stream_with_embed_multilingual_v3_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
generator = cohere_instance.stream("Write a story.")
|
|
|
|
generator = cohere_instance.stream("Write a story.")
|
|
|
|
for token in generator:
|
|
|
|
for token in generator:
|
|
|
@ -352,25 +394,33 @@ def test_cohere_async_call_with_base_model(cohere_instance):
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_call_with_embed_english_v2_model(cohere_instance,):
|
|
|
|
def test_cohere_async_call_with_embed_english_v2_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-english-v2.0"
|
|
|
|
cohere_instance.model = "embed-english-v2.0"
|
|
|
|
response = cohere_instance.async_call("Translate to French.")
|
|
|
|
response = cohere_instance.async_call("Translate to French.")
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_call_with_embed_english_v3_model(cohere_instance,):
|
|
|
|
def test_cohere_async_call_with_embed_english_v3_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
response = cohere_instance.async_call("Translate to French.")
|
|
|
|
response = cohere_instance.async_call("Translate to French.")
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_call_with_embed_multilingual_v2_model(cohere_instance,):
|
|
|
|
def test_cohere_async_call_with_embed_multilingual_v2_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
response = cohere_instance.async_call("Translate to French.")
|
|
|
|
response = cohere_instance.async_call("Translate to French.")
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_call_with_embed_multilingual_v3_model(cohere_instance,):
|
|
|
|
def test_cohere_async_call_with_embed_multilingual_v3_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
response = cohere_instance.async_call("Translate to French.")
|
|
|
|
response = cohere_instance.async_call("Translate to French.")
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
@ -390,28 +440,36 @@ def test_cohere_async_stream_with_base_model(cohere_instance):
|
|
|
|
assert isinstance(token, str)
|
|
|
|
assert isinstance(token, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_stream_with_embed_english_v2_model(cohere_instance,):
|
|
|
|
def test_cohere_async_stream_with_embed_english_v2_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-english-v2.0"
|
|
|
|
cohere_instance.model = "embed-english-v2.0"
|
|
|
|
async_generator = cohere_instance.async_stream("Write a story.")
|
|
|
|
async_generator = cohere_instance.async_stream("Write a story.")
|
|
|
|
for token in async_generator:
|
|
|
|
for token in async_generator:
|
|
|
|
assert isinstance(token, str)
|
|
|
|
assert isinstance(token, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_stream_with_embed_english_v3_model(cohere_instance,):
|
|
|
|
def test_cohere_async_stream_with_embed_english_v3_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
async_generator = cohere_instance.async_stream("Write a story.")
|
|
|
|
async_generator = cohere_instance.async_stream("Write a story.")
|
|
|
|
for token in async_generator:
|
|
|
|
for token in async_generator:
|
|
|
|
assert isinstance(token, str)
|
|
|
|
assert isinstance(token, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_stream_with_embed_multilingual_v2_model(cohere_instance,):
|
|
|
|
def test_cohere_async_stream_with_embed_multilingual_v2_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v2.0"
|
|
|
|
async_generator = cohere_instance.async_stream("Write a story.")
|
|
|
|
async_generator = cohere_instance.async_stream("Write a story.")
|
|
|
|
for token in async_generator:
|
|
|
|
for token in async_generator:
|
|
|
|
assert isinstance(token, str)
|
|
|
|
assert isinstance(token, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_async_stream_with_embed_multilingual_v3_model(cohere_instance,):
|
|
|
|
def test_cohere_async_stream_with_embed_multilingual_v3_model(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
async_generator = cohere_instance.async_stream("Write a story.")
|
|
|
|
async_generator = cohere_instance.async_stream("Write a story.")
|
|
|
|
for token in async_generator:
|
|
|
|
for token in async_generator:
|
|
|
@ -421,7 +479,9 @@ def test_cohere_async_stream_with_embed_multilingual_v3_model(cohere_instance,):
|
|
|
|
def test_cohere_representation_model_embedding(cohere_instance):
|
|
|
|
def test_cohere_representation_model_embedding(cohere_instance):
|
|
|
|
# Test using the Representation model for text embedding
|
|
|
|
# Test using the Representation model for text embedding
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
embedding = cohere_instance.embed("Generate an embedding for this text.")
|
|
|
|
embedding = cohere_instance.embed(
|
|
|
|
|
|
|
|
"Generate an embedding for this text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(embedding, list)
|
|
|
|
assert isinstance(embedding, list)
|
|
|
|
assert len(embedding) > 0
|
|
|
|
assert len(embedding) > 0
|
|
|
|
|
|
|
|
|
|
|
@ -435,20 +495,26 @@ def test_cohere_representation_model_classification(cohere_instance):
|
|
|
|
assert "score" in classification
|
|
|
|
assert "score" in classification
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_language_detection(cohere_instance,):
|
|
|
|
def test_cohere_representation_model_language_detection(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for language detection
|
|
|
|
# Test using the Representation model for language detection
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
"Detect the language of this text.")
|
|
|
|
"Detect the language of this text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_max_tokens_limit_exceeded(
|
|
|
|
def test_cohere_representation_model_max_tokens_limit_exceeded(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test handling max tokens limit exceeded error
|
|
|
|
# Test handling max tokens limit exceeded error
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
prompt = ("This is a test prompt that will exceed the max tokens limit.")
|
|
|
|
prompt = (
|
|
|
|
|
|
|
|
"This is a test prompt that will exceed the max tokens limit."
|
|
|
|
|
|
|
|
)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
|
|
|
|
|
|
|
@ -456,80 +522,102 @@ def test_cohere_representation_model_max_tokens_limit_exceeded(
|
|
|
|
# Add more production-grade test cases based on real-world scenarios
|
|
|
|
# Add more production-grade test cases based on real-world scenarios
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_multilingual_embedding(cohere_instance,):
|
|
|
|
def test_cohere_representation_model_multilingual_embedding(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for multilingual text embedding
|
|
|
|
# Test using the Representation model for multilingual text embedding
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
embedding = cohere_instance.embed("Generate multilingual embeddings.")
|
|
|
|
embedding = cohere_instance.embed(
|
|
|
|
|
|
|
|
"Generate multilingual embeddings."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(embedding, list)
|
|
|
|
assert isinstance(embedding, list)
|
|
|
|
assert len(embedding) > 0
|
|
|
|
assert len(embedding) > 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_multilingual_classification(
|
|
|
|
def test_cohere_representation_model_multilingual_classification(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for multilingual text classification
|
|
|
|
# Test using the Representation model for multilingual text classification
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
classification = cohere_instance.classify("Classify multilingual text.")
|
|
|
|
classification = cohere_instance.classify(
|
|
|
|
|
|
|
|
"Classify multilingual text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(classification, dict)
|
|
|
|
assert isinstance(classification, dict)
|
|
|
|
assert "class" in classification
|
|
|
|
assert "class" in classification
|
|
|
|
assert "score" in classification
|
|
|
|
assert "score" in classification
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_multilingual_language_detection(
|
|
|
|
def test_cohere_representation_model_multilingual_language_detection(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for multilingual language detection
|
|
|
|
# Test using the Representation model for multilingual language detection
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
"Detect the language of multilingual text.")
|
|
|
|
"Detect the language of multilingual text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_multilingual_max_tokens_limit_exceeded(
|
|
|
|
def test_cohere_representation_model_multilingual_max_tokens_limit_exceeded(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test handling max tokens limit exceeded error for multilingual model
|
|
|
|
# Test handling max tokens limit exceeded error for multilingual model
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-v3.0"
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
prompt = ("This is a test prompt that will exceed the max tokens limit"
|
|
|
|
prompt = (
|
|
|
|
" for multilingual model.")
|
|
|
|
"This is a test prompt that will exceed the max tokens limit"
|
|
|
|
|
|
|
|
" for multilingual model."
|
|
|
|
|
|
|
|
)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_multilingual_light_embedding(
|
|
|
|
def test_cohere_representation_model_multilingual_light_embedding(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for multilingual light text embedding
|
|
|
|
# Test using the Representation model for multilingual light text embedding
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
embedding = cohere_instance.embed("Generate multilingual light embeddings.")
|
|
|
|
embedding = cohere_instance.embed(
|
|
|
|
|
|
|
|
"Generate multilingual light embeddings."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(embedding, list)
|
|
|
|
assert isinstance(embedding, list)
|
|
|
|
assert len(embedding) > 0
|
|
|
|
assert len(embedding) > 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_multilingual_light_classification(
|
|
|
|
def test_cohere_representation_model_multilingual_light_classification(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for multilingual light text classification
|
|
|
|
# Test using the Representation model for multilingual light text classification
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
classification = cohere_instance.classify(
|
|
|
|
classification = cohere_instance.classify(
|
|
|
|
"Classify multilingual light text.")
|
|
|
|
"Classify multilingual light text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(classification, dict)
|
|
|
|
assert isinstance(classification, dict)
|
|
|
|
assert "class" in classification
|
|
|
|
assert "class" in classification
|
|
|
|
assert "score" in classification
|
|
|
|
assert "score" in classification
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_multilingual_light_language_detection(
|
|
|
|
def test_cohere_representation_model_multilingual_light_language_detection(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for multilingual light language detection
|
|
|
|
# Test using the Representation model for multilingual light language detection
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
"Detect the language of multilingual light text.")
|
|
|
|
"Detect the language of multilingual light text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_multilingual_light_max_tokens_limit_exceeded(
|
|
|
|
def test_cohere_representation_model_multilingual_light_max_tokens_limit_exceeded(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test handling max tokens limit exceeded error for multilingual light model
|
|
|
|
# Test handling max tokens limit exceeded error for multilingual light model
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-multilingual-light-v3.0"
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
prompt = ("This is a test prompt that will exceed the max tokens limit"
|
|
|
|
prompt = (
|
|
|
|
" for multilingual light model.")
|
|
|
|
"This is a test prompt that will exceed the max tokens limit"
|
|
|
|
|
|
|
|
" for multilingual light model."
|
|
|
|
|
|
|
|
)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
|
|
|
|
|
|
|
@ -537,14 +625,18 @@ def test_cohere_representation_model_multilingual_light_max_tokens_limit_exceede
|
|
|
|
def test_cohere_command_light_model(cohere_instance):
|
|
|
|
def test_cohere_command_light_model(cohere_instance):
|
|
|
|
# Test using the Command Light model for text generation
|
|
|
|
# Test using the Command Light model for text generation
|
|
|
|
cohere_instance.model = "command-light"
|
|
|
|
cohere_instance.model = "command-light"
|
|
|
|
response = cohere_instance("Generate text using Command Light model.")
|
|
|
|
response = cohere_instance(
|
|
|
|
|
|
|
|
"Generate text using Command Light model."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_base_light_model(cohere_instance):
|
|
|
|
def test_cohere_base_light_model(cohere_instance):
|
|
|
|
# Test using the Base Light model for text generation
|
|
|
|
# Test using the Base Light model for text generation
|
|
|
|
cohere_instance.model = "base-light"
|
|
|
|
cohere_instance.model = "base-light"
|
|
|
|
response = cohere_instance("Generate text using Base Light model.")
|
|
|
|
response = cohere_instance(
|
|
|
|
|
|
|
|
"Generate text using Base Light model."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -555,7 +647,9 @@ def test_cohere_generate_summarize_endpoint(cohere_instance):
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_english_embedding(cohere_instance,):
|
|
|
|
def test_cohere_representation_model_english_embedding(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for English text embedding
|
|
|
|
# Test using the Representation model for English text embedding
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
embedding = cohere_instance.embed("Generate English embeddings.")
|
|
|
|
embedding = cohere_instance.embed("Generate English embeddings.")
|
|
|
@ -563,69 +657,90 @@ def test_cohere_representation_model_english_embedding(cohere_instance,):
|
|
|
|
assert len(embedding) > 0
|
|
|
|
assert len(embedding) > 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_english_classification(cohere_instance,):
|
|
|
|
def test_cohere_representation_model_english_classification(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for English text classification
|
|
|
|
# Test using the Representation model for English text classification
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
classification = cohere_instance.classify("Classify English text.")
|
|
|
|
classification = cohere_instance.classify(
|
|
|
|
|
|
|
|
"Classify English text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(classification, dict)
|
|
|
|
assert isinstance(classification, dict)
|
|
|
|
assert "class" in classification
|
|
|
|
assert "class" in classification
|
|
|
|
assert "score" in classification
|
|
|
|
assert "score" in classification
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_english_language_detection(
|
|
|
|
def test_cohere_representation_model_english_language_detection(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for English language detection
|
|
|
|
# Test using the Representation model for English language detection
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
"Detect the language of English text.")
|
|
|
|
"Detect the language of English text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_english_max_tokens_limit_exceeded(
|
|
|
|
def test_cohere_representation_model_english_max_tokens_limit_exceeded(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test handling max tokens limit exceeded error for English model
|
|
|
|
# Test handling max tokens limit exceeded error for English model
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-v3.0"
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
prompt = ("This is a test prompt that will exceed the max tokens limit"
|
|
|
|
prompt = (
|
|
|
|
" for English model.")
|
|
|
|
"This is a test prompt that will exceed the max tokens limit"
|
|
|
|
|
|
|
|
" for English model."
|
|
|
|
|
|
|
|
)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_english_light_embedding(cohere_instance,):
|
|
|
|
def test_cohere_representation_model_english_light_embedding(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for English light text embedding
|
|
|
|
# Test using the Representation model for English light text embedding
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
embedding = cohere_instance.embed("Generate English light embeddings.")
|
|
|
|
embedding = cohere_instance.embed(
|
|
|
|
|
|
|
|
"Generate English light embeddings."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(embedding, list)
|
|
|
|
assert isinstance(embedding, list)
|
|
|
|
assert len(embedding) > 0
|
|
|
|
assert len(embedding) > 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_english_light_classification(
|
|
|
|
def test_cohere_representation_model_english_light_classification(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for English light text classification
|
|
|
|
# Test using the Representation model for English light text classification
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
classification = cohere_instance.classify("Classify English light text.")
|
|
|
|
classification = cohere_instance.classify(
|
|
|
|
|
|
|
|
"Classify English light text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(classification, dict)
|
|
|
|
assert isinstance(classification, dict)
|
|
|
|
assert "class" in classification
|
|
|
|
assert "class" in classification
|
|
|
|
assert "score" in classification
|
|
|
|
assert "score" in classification
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_english_light_language_detection(
|
|
|
|
def test_cohere_representation_model_english_light_language_detection(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test using the Representation model for English light language detection
|
|
|
|
# Test using the Representation model for English light language detection
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
language = cohere_instance.detect_language(
|
|
|
|
"Detect the language of English light text.")
|
|
|
|
"Detect the language of English light text."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
assert isinstance(language, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_representation_model_english_light_max_tokens_limit_exceeded(
|
|
|
|
def test_cohere_representation_model_english_light_max_tokens_limit_exceeded(
|
|
|
|
cohere_instance,):
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test handling max tokens limit exceeded error for English light model
|
|
|
|
# Test handling max tokens limit exceeded error for English light model
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
cohere_instance.model = "embed-english-light-v3.0"
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
cohere_instance.max_tokens = 10
|
|
|
|
prompt = ("This is a test prompt that will exceed the max tokens limit"
|
|
|
|
prompt = (
|
|
|
|
" for English light model.")
|
|
|
|
"This is a test prompt that will exceed the max tokens limit"
|
|
|
|
|
|
|
|
" for English light model."
|
|
|
|
|
|
|
|
)
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
cohere_instance.embed(prompt)
|
|
|
|
|
|
|
|
|
|
|
@ -633,7 +748,9 @@ def test_cohere_representation_model_english_light_max_tokens_limit_exceeded(
|
|
|
|
def test_cohere_command_model(cohere_instance):
|
|
|
|
def test_cohere_command_model(cohere_instance):
|
|
|
|
# Test using the Command model for text generation
|
|
|
|
# Test using the Command model for text generation
|
|
|
|
cohere_instance.model = "command"
|
|
|
|
cohere_instance.model = "command"
|
|
|
|
response = cohere_instance("Generate text using the Command model.")
|
|
|
|
response = cohere_instance(
|
|
|
|
|
|
|
|
"Generate text using the Command model."
|
|
|
|
|
|
|
|
)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
assert isinstance(response, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -647,7 +764,9 @@ def test_cohere_invalid_model(cohere_instance):
|
|
|
|
cohere_instance("Generate text using an invalid model.")
|
|
|
|
cohere_instance("Generate text using an invalid model.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cohere_base_model_generation_with_max_tokens(cohere_instance,):
|
|
|
|
def test_cohere_base_model_generation_with_max_tokens(
|
|
|
|
|
|
|
|
cohere_instance,
|
|
|
|
|
|
|
|
):
|
|
|
|
# Test generating text using the base model with a specified max_tokens limit
|
|
|
|
# Test generating text using the base model with a specified max_tokens limit
|
|
|
|
cohere_instance.model = "base"
|
|
|
|
cohere_instance.model = "base"
|
|
|
|
cohere_instance.max_tokens = 20
|
|
|
|
cohere_instance.max_tokens = 20
|
|
|
|