mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-13 19:32:37 +00:00
fix test
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updating structure of default Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix model id creation Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
b3addc94d1
commit
7ffd20d112
10 changed files with 119 additions and 62 deletions
|
|
@ -8,9 +8,8 @@ import time
|
|||
from io import BytesIO
|
||||
|
||||
import pytest
|
||||
from llama_stack_client import BadRequestError, NotFoundError
|
||||
from llama_stack_client import BadRequestError
|
||||
from openai import BadRequestError as OpenAIBadRequestError
|
||||
from openai import NotFoundError as OpenAINotFoundError
|
||||
|
||||
from llama_stack.apis.vector_io import Chunk
|
||||
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
||||
|
|
@ -839,7 +838,7 @@ def test_openai_vector_store_list_files_invalid_vector_store(
|
|||
if isinstance(compat_client, LlamaStackAsLibraryClient):
|
||||
errors = ValueError
|
||||
else:
|
||||
errors = (NotFoundError, OpenAINotFoundError)
|
||||
errors = (BadRequestError, OpenAIBadRequestError)
|
||||
|
||||
with pytest.raises(errors):
|
||||
compat_client.vector_stores.files.list(vector_store_id="abc123")
|
||||
|
|
@ -1528,11 +1527,11 @@ def test_openai_vector_store_file_batch_error_handling(
|
|||
batch_id="non_existent_batch_id",
|
||||
)
|
||||
|
||||
# Test operations on non-existent vector store (returns NotFoundError)
|
||||
# Test operations on non-existent vector store (returns BadRequestError)
|
||||
if isinstance(compat_client, LlamaStackAsLibraryClient):
|
||||
vector_store_errors = ValueError
|
||||
else:
|
||||
vector_store_errors = (NotFoundError, OpenAINotFoundError)
|
||||
vector_store_errors = (BadRequestError, OpenAIBadRequestError)
|
||||
|
||||
with pytest.raises(vector_store_errors): # Should raise an error for non-existent vector store
|
||||
compat_client.vector_stores.file_batches.create(
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from unittest.mock import AsyncMock
|
|||
import pytest
|
||||
|
||||
from llama_stack.apis.models import Model, ModelType
|
||||
from llama_stack.core.datatypes import StackRunConfig, VectorStoresConfig
|
||||
from llama_stack.core.datatypes import DefaultEmbeddingModel, StackRunConfig, VectorStoresConfig
|
||||
from llama_stack.core.stack import validate_vector_stores_config
|
||||
from llama_stack.providers.datatypes import Api
|
||||
|
||||
|
|
@ -20,7 +20,15 @@ class TestVectorStoresValidation:
|
|||
async def test_validate_missing_model(self):
|
||||
"""Test validation fails when model not found."""
|
||||
run_config = StackRunConfig(
|
||||
image_name="test", providers={}, vector_stores=VectorStoresConfig(embedding_model_id="missing")
|
||||
image_name="test",
|
||||
providers={},
|
||||
vector_stores=VectorStoresConfig(
|
||||
default_provider_id="faiss",
|
||||
default_embedding_model=DefaultEmbeddingModel(
|
||||
provider_id="p",
|
||||
model_id="missing",
|
||||
),
|
||||
),
|
||||
)
|
||||
mock_models = AsyncMock()
|
||||
mock_models.list_models.return_value = []
|
||||
|
|
@ -31,12 +39,20 @@ class TestVectorStoresValidation:
|
|||
async def test_validate_success(self):
|
||||
"""Test validation passes with valid model."""
|
||||
run_config = StackRunConfig(
|
||||
image_name="test", providers={}, vector_stores=VectorStoresConfig(embedding_model_id="valid")
|
||||
image_name="test",
|
||||
providers={},
|
||||
vector_stores=VectorStoresConfig(
|
||||
default_provider_id="faiss",
|
||||
default_embedding_model=DefaultEmbeddingModel(
|
||||
provider_id="p",
|
||||
model_id="valid",
|
||||
),
|
||||
),
|
||||
)
|
||||
mock_models = AsyncMock()
|
||||
mock_models.list_models.return_value = [
|
||||
Model(
|
||||
identifier="valid",
|
||||
identifier="p/valid", # Must match provider_id/model_id format
|
||||
model_type=ModelType.embedding,
|
||||
metadata={"embedding_dimension": 768},
|
||||
provider_id="p",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue