mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 18:19:51 +00:00
Fix embedding test case
This commit is contained in:
parent
58e164b8bc
commit
b58d8d8c90
1 changed files with 5 additions and 3 deletions
|
|
@ -55,7 +55,6 @@
|
||||||
#
|
#
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from llama_stack_client import BadRequestError
|
|
||||||
from llama_stack_client.types import EmbeddingsResponse
|
from llama_stack_client.types import EmbeddingsResponse
|
||||||
from llama_stack_client.types.shared.interleaved_content import (
|
from llama_stack_client.types.shared.interleaved_content import (
|
||||||
ImageContentItem,
|
ImageContentItem,
|
||||||
|
|
@ -203,7 +202,10 @@ def test_embedding_truncation_error(
|
||||||
):
|
):
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
||||||
with pytest.raises(BadRequestError):
|
# Using LlamaStackClient from llama_stack_client will raise llama_stack_client.BadRequestError
|
||||||
|
# While using LlamaStackAsLibraryClient from llama_stack.distribution.library_client will raise the error that the backend raises
|
||||||
|
# Here we are using the LlamaStackAsLibraryClient, so the error raised is the same as what the backend raises
|
||||||
|
with pytest.raises(ValueError):
|
||||||
llama_stack_client.inference.embeddings(
|
llama_stack_client.inference.embeddings(
|
||||||
model_id=embedding_model_id,
|
model_id=embedding_model_id,
|
||||||
contents=[DUMMY_LONG_TEXT],
|
contents=[DUMMY_LONG_TEXT],
|
||||||
|
|
@ -283,7 +285,7 @@ def test_embedding_text_truncation_error(
|
||||||
):
|
):
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
||||||
with pytest.raises(BadRequestError):
|
with pytest.raises(ValueError):
|
||||||
llama_stack_client.inference.embeddings(
|
llama_stack_client.inference.embeddings(
|
||||||
model_id=embedding_model_id,
|
model_id=embedding_model_id,
|
||||||
contents=[DUMMY_STRING],
|
contents=[DUMMY_STRING],
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue