feat: update openai tests to work with both clients (#2442)
Some checks failed
Pre-commit / pre-commit (push) Successful in 3m11s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
Integration Tests / test-matrix (http, 3.10, post_training) (push) Failing after 13s
Integration Tests / test-matrix (http, 3.11, inspect) (push) Failing after 17s
Integration Tests / test-matrix (http, 3.10, providers) (push) Failing after 19s
Integration Tests / test-matrix (http, 3.12, datasets) (push) Failing after 15s
Integration Tests / test-matrix (http, 3.12, tool_runtime) (push) Failing after 13s
Integration Tests / test-matrix (library, 3.10, providers) (push) Failing after 12s
Integration Tests / test-matrix (http, 3.10, datasets) (push) Failing after 24s
Integration Tests / test-matrix (library, 3.10, datasets) (push) Failing after 17s
Integration Tests / test-matrix (http, 3.12, providers) (push) Failing after 18s
Integration Tests / test-matrix (http, 3.11, agents) (push) Failing after 21s
Integration Tests / test-matrix (http, 3.11, datasets) (push) Failing after 21s
Integration Tests / test-matrix (http, 3.10, scoring) (push) Failing after 20s
Integration Tests / test-matrix (http, 3.10, tool_runtime) (push) Failing after 22s
Integration Tests / test-matrix (http, 3.12, inference) (push) Failing after 16s
Integration Tests / test-matrix (http, 3.12, agents) (push) Failing after 20s
Integration Tests / test-matrix (library, 3.10, inference) (push) Failing after 16s
Integration Tests / test-matrix (http, 3.12, post_training) (push) Failing after 17s
Integration Tests / test-matrix (library, 3.10, post_training) (push) Failing after 18s
Integration Tests / test-matrix (http, 3.11, tool_runtime) (push) Failing after 20s
Integration Tests / test-matrix (library, 3.10, inspect) (push) Failing after 16s
Integration Tests / test-matrix (http, 3.12, inspect) (push) Failing after 18s
Integration Tests / test-matrix (http, 3.10, agents) (push) Failing after 22s
Integration Tests / test-matrix (http, 3.11, scoring) (push) Failing after 19s
Integration Tests / test-matrix (http, 3.10, inference) (push) Failing after 23s
Integration Tests / test-matrix (http, 3.10, inspect) (push) Failing after 24s
Integration Tests / test-matrix (http, 3.11, post_training) (push) Failing after 18s
Integration Tests / test-matrix (http, 3.11, inference) (push) Failing after 22s
Integration Tests / test-matrix (http, 3.11, providers) (push) Failing after 20s
Integration Tests / test-matrix (library, 3.10, agents) (push) Failing after 15s
Integration Tests / test-matrix (library, 3.10, tool_runtime) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.11, agents) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.11, scoring) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.11, inference) (push) Failing after 7s
Integration Tests / test-matrix (http, 3.12, scoring) (push) Failing after 18s
Integration Tests / test-matrix (library, 3.11, datasets) (push) Failing after 10s
Integration Tests / test-matrix (library, 3.10, scoring) (push) Failing after 13s
Integration Tests / test-matrix (library, 3.12, datasets) (push) Failing after 10s
Integration Tests / test-matrix (library, 3.11, post_training) (push) Failing after 7s
Integration Tests / test-matrix (library, 3.11, inspect) (push) Failing after 9s
Test External Providers / test-external-providers (venv) (push) Failing after 7s
Integration Tests / test-matrix (library, 3.12, providers) (push) Failing after 11s
Integration Tests / test-matrix (library, 3.12, inspect) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.12, post_training) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.12, agents) (push) Failing after 7s
Integration Tests / test-matrix (library, 3.12, inference) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.11, tool_runtime) (push) Failing after 10s
Integration Tests / test-matrix (library, 3.11, providers) (push) Failing after 12s
Integration Tests / test-matrix (library, 3.12, tool_runtime) (push) Failing after 11s
Unit Tests / unit-tests (3.11) (push) Failing after 9s
Unit Tests / unit-tests (3.13) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.12, scoring) (push) Failing after 1m45s
Update ReadTheDocs / update-readthedocs (push) Failing after 1m46s
Unit Tests / unit-tests (3.12) (push) Failing after 2m1s
Unit Tests / unit-tests (3.10) (push) Failing after 2m3s

https://github.com/meta-llama/llama-stack-client-python/pull/238 updated
llama-stack-client to also support Open AI endpoints for embeddings,
files, vector-stores. This updates the test to test all configs --
openai sdk, llama stack sdk and library-as-client.
This commit is contained in:
Hardik Shah 2025-06-12 16:30:23 -07:00 committed by GitHub
parent 0bc1747ed8
commit fef670b024
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 30 additions and 34 deletions

View file

@ -109,11 +109,6 @@ class OpenAIVectorStoreMixin(ABC):
provider_id=provider_id,
provider_resource_id=provider_vector_db_id,
)
from rich.pretty import pprint
print("VECTOR DB")
pprint(vector_db)
# Register the vector DB
await self.register_vector_db(vector_db)

View file

@ -34,11 +34,15 @@ def skip_if_model_doesnt_support_variable_dimensions(model_id):
pytest.skip("{model_id} does not support variable output embedding dimensions")
def skip_if_model_doesnt_support_openai_embeddings(client_with_models, model_id):
if isinstance(client_with_models, LlamaStackAsLibraryClient):
pytest.skip("OpenAI embeddings are not supported when testing with library client yet.")
@pytest.fixture(params=["openai_client", "llama_stack_client"])
def compat_client(request, client_with_models):
if request.param == "openai_client" and isinstance(client_with_models, LlamaStackAsLibraryClient):
pytest.skip("OpenAI client tests not supported with library client")
return request.getfixturevalue(request.param)
provider = provider_from_model(client_with_models, model_id)
def skip_if_model_doesnt_support_openai_embeddings(client, model_id):
provider = provider_from_model(client, model_id)
if provider.provider_type in (
"inline::meta-reference",
"remote::bedrock",
@ -58,13 +62,13 @@ def openai_client(client_with_models):
return OpenAI(base_url=base_url, api_key="fake")
def test_openai_embeddings_single_string(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_single_string(compat_client, client_with_models, embedding_model_id):
"""Test OpenAI embeddings endpoint with a single string input."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
input_text = "Hello, world!"
response = openai_client.embeddings.create(
response = compat_client.embeddings.create(
model=embedding_model_id,
input=input_text,
encoding_format="float",
@ -80,13 +84,13 @@ def test_openai_embeddings_single_string(openai_client, client_with_models, embe
assert all(isinstance(x, float) for x in response.data[0].embedding)
def test_openai_embeddings_multiple_strings(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_multiple_strings(compat_client, client_with_models, embedding_model_id):
"""Test OpenAI embeddings endpoint with multiple string inputs."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
input_texts = ["Hello, world!", "How are you today?", "This is a test."]
response = openai_client.embeddings.create(
response = compat_client.embeddings.create(
model=embedding_model_id,
input=input_texts,
)
@ -103,13 +107,13 @@ def test_openai_embeddings_multiple_strings(openai_client, client_with_models, e
assert all(isinstance(x, float) for x in embedding_data.embedding)
def test_openai_embeddings_with_encoding_format_float(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_with_encoding_format_float(compat_client, client_with_models, embedding_model_id):
"""Test OpenAI embeddings endpoint with float encoding format."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
input_text = "Test encoding format"
response = openai_client.embeddings.create(
response = compat_client.embeddings.create(
model=embedding_model_id,
input=input_text,
encoding_format="float",
@ -121,7 +125,7 @@ def test_openai_embeddings_with_encoding_format_float(openai_client, client_with
assert all(isinstance(x, float) for x in response.data[0].embedding)
def test_openai_embeddings_with_dimensions(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_with_dimensions(compat_client, client_with_models, embedding_model_id):
"""Test OpenAI embeddings endpoint with custom dimensions parameter."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
skip_if_model_doesnt_support_variable_dimensions(embedding_model_id)
@ -129,7 +133,7 @@ def test_openai_embeddings_with_dimensions(openai_client, client_with_models, em
input_text = "Test dimensions parameter"
dimensions = 16
response = openai_client.embeddings.create(
response = compat_client.embeddings.create(
model=embedding_model_id,
input=input_text,
dimensions=dimensions,
@ -142,14 +146,14 @@ def test_openai_embeddings_with_dimensions(openai_client, client_with_models, em
assert len(response.data[0].embedding) > 0
def test_openai_embeddings_with_user_parameter(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_with_user_parameter(compat_client, client_with_models, embedding_model_id):
"""Test OpenAI embeddings endpoint with user parameter."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
input_text = "Test user parameter"
user_id = "test-user-123"
response = openai_client.embeddings.create(
response = compat_client.embeddings.create(
model=embedding_model_id,
input=input_text,
user=user_id,
@ -161,41 +165,41 @@ def test_openai_embeddings_with_user_parameter(openai_client, client_with_models
assert len(response.data[0].embedding) > 0
def test_openai_embeddings_empty_list_error(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_empty_list_error(compat_client, client_with_models, embedding_model_id):
"""Test that empty list input raises an appropriate error."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
with pytest.raises(Exception): # noqa: B017
openai_client.embeddings.create(
compat_client.embeddings.create(
model=embedding_model_id,
input=[],
)
def test_openai_embeddings_invalid_model_error(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_invalid_model_error(compat_client, client_with_models, embedding_model_id):
"""Test that invalid model ID raises an appropriate error."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
with pytest.raises(Exception): # noqa: B017
openai_client.embeddings.create(
compat_client.embeddings.create(
model="invalid-model-id",
input="Test text",
)
def test_openai_embeddings_different_inputs_different_outputs(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_different_inputs_different_outputs(compat_client, client_with_models, embedding_model_id):
"""Test that different inputs produce different embeddings."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
input_text1 = "This is the first text"
input_text2 = "This is completely different content"
response1 = openai_client.embeddings.create(
response1 = compat_client.embeddings.create(
model=embedding_model_id,
input=input_text1,
)
response2 = openai_client.embeddings.create(
response2 = compat_client.embeddings.create(
model=embedding_model_id,
input=input_text2,
)
@ -208,7 +212,7 @@ def test_openai_embeddings_different_inputs_different_outputs(openai_client, cli
assert embedding1 != embedding2
def test_openai_embeddings_with_encoding_format_base64(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_with_encoding_format_base64(compat_client, client_with_models, embedding_model_id):
"""Test OpenAI embeddings endpoint with base64 encoding format."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
skip_if_model_doesnt_support_variable_dimensions(embedding_model_id)
@ -216,7 +220,7 @@ def test_openai_embeddings_with_encoding_format_base64(openai_client, client_wit
input_text = "Test base64 encoding format"
dimensions = 12
response = openai_client.embeddings.create(
response = compat_client.embeddings.create(
model=embedding_model_id,
input=input_text,
encoding_format="base64",
@ -241,13 +245,13 @@ def test_openai_embeddings_with_encoding_format_base64(openai_client, client_wit
assert all(isinstance(x, float) for x in embedding_floats)
def test_openai_embeddings_base64_batch_processing(openai_client, client_with_models, embedding_model_id):
def test_openai_embeddings_base64_batch_processing(compat_client, client_with_models, embedding_model_id):
"""Test OpenAI embeddings endpoint with base64 encoding for batch processing."""
skip_if_model_doesnt_support_openai_embeddings(client_with_models, embedding_model_id)
input_texts = ["First text for base64", "Second text for base64", "Third text for base64"]
response = openai_client.embeddings.create(
response = compat_client.embeddings.create(
model=embedding_model_id,
input=input_texts,
encoding_format="base64",

View file

@ -17,9 +17,6 @@ logger = logging.getLogger(__name__)
def skip_if_provider_doesnt_support_openai_vector_stores(client_with_models):
if isinstance(client_with_models, LlamaStackAsLibraryClient):
pytest.skip("OpenAI vector stores are not supported when testing with library client yet.")
vector_io_providers = [p for p in client_with_models.providers.list() if p.api == "vector_io"]
for p in vector_io_providers:
if p.provider_type in ["inline::faiss", "inline::sqlite-vec"]:
@ -34,7 +31,7 @@ def openai_client(client_with_models):
return OpenAI(base_url=base_url, api_key="fake")
@pytest.fixture(params=["openai_client"]) # , "llama_stack_client"])
@pytest.fixture(params=["openai_client", "llama_stack_client"])
def compat_client(request, client_with_models):
if request.param == "openai_client" and isinstance(client_with_models, LlamaStackAsLibraryClient):
pytest.skip("OpenAI client tests not supported with library client")