mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-03 19:57:35 +00:00
fix int tests
This commit is contained in:
parent
51a3e9930e
commit
b3b31bf357
3 changed files with 37 additions and 37 deletions
|
@ -11,6 +11,8 @@
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Annotated, Any, Literal, Protocol, runtime_checkable
|
from typing import Annotated, Any, Literal, Protocol, runtime_checkable
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from llama_stack.apis.inference import InterleavedContent
|
from llama_stack.apis.inference import InterleavedContent
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
from llama_stack.apis.version import LLAMA_STACK_API_V1
|
from llama_stack.apis.version import LLAMA_STACK_API_V1
|
||||||
|
@ -19,8 +21,6 @@ from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
|
||||||
from llama_stack.schema_utils import json_schema_type, webmethod
|
from llama_stack.schema_utils import json_schema_type, webmethod
|
||||||
from llama_stack.strong_typing.schema import register_schema
|
from llama_stack.strong_typing.schema import register_schema
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class ChunkMetadata(BaseModel):
|
class ChunkMetadata(BaseModel):
|
||||||
|
@ -350,12 +350,7 @@ class VectorStoreFileLastError(BaseModel):
|
||||||
message: str
|
message: str
|
||||||
|
|
||||||
|
|
||||||
VectorStoreFileStatus = (
|
VectorStoreFileStatus = Literal["completed"] | Literal["in_progress"] | Literal["cancelled"] | Literal["failed"]
|
||||||
Literal["completed"]
|
|
||||||
| Literal["in_progress"]
|
|
||||||
| Literal["cancelled"]
|
|
||||||
| Literal["failed"]
|
|
||||||
)
|
|
||||||
register_schema(VectorStoreFileStatus, name="VectorStoreFileStatus")
|
register_schema(VectorStoreFileStatus, name="VectorStoreFileStatus")
|
||||||
|
|
||||||
|
|
||||||
|
@ -561,9 +556,7 @@ class VectorIO(Protocol):
|
||||||
"""
|
"""
|
||||||
...
|
...
|
||||||
|
|
||||||
@webmethod(
|
@webmethod(route="/vector_stores/{vector_store_id}", method="GET", level=LLAMA_STACK_API_V1)
|
||||||
route="/vector_stores/{vector_store_id}", method="GET", level=LLAMA_STACK_API_V1
|
|
||||||
)
|
|
||||||
async def openai_retrieve_vector_store(
|
async def openai_retrieve_vector_store(
|
||||||
self,
|
self,
|
||||||
vector_store_id: str,
|
vector_store_id: str,
|
||||||
|
|
|
@ -616,7 +616,6 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
chunk_overlap_tokens,
|
chunk_overlap_tokens,
|
||||||
attributes,
|
attributes,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not chunks:
|
if not chunks:
|
||||||
vector_store_file_object.status = "failed"
|
vector_store_file_object.status = "failed"
|
||||||
vector_store_file_object.last_error = VectorStoreFileLastError(
|
vector_store_file_object.last_error = VectorStoreFileLastError(
|
||||||
|
@ -877,7 +876,6 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
batch_info = self.openai_file_batches[batch_id]
|
batch_info = self.openai_file_batches[batch_id]
|
||||||
batch_object = batch_info["batch_object"]
|
batch_object = batch_info["batch_object"]
|
||||||
vector_store_id = batch_object.vector_store_id
|
vector_store_id = batch_object.vector_store_id
|
||||||
|
|
||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
try:
|
try:
|
||||||
# Process each file
|
# Process each file
|
||||||
|
|
|
@ -918,7 +918,6 @@ def test_openai_vector_store_search_modes(llama_stack_client, client_with_models
|
||||||
assert search_response is not None
|
assert search_response is not None
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Client SDK needs updating")
|
|
||||||
def test_openai_vector_store_file_batch_create_and_retrieve(compat_client_with_empty_stores, client_with_models):
|
def test_openai_vector_store_file_batch_create_and_retrieve(compat_client_with_empty_stores, client_with_models):
|
||||||
"""Test creating and retrieving a vector store file batch."""
|
"""Test creating and retrieving a vector store file batch."""
|
||||||
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
||||||
|
@ -946,31 +945,32 @@ def test_openai_vector_store_file_batch_create_and_retrieve(compat_client_with_e
|
||||||
assert batch.object == "vector_store.file_batch"
|
assert batch.object == "vector_store.file_batch"
|
||||||
assert batch.vector_store_id == vector_store.id
|
assert batch.vector_store_id == vector_store.id
|
||||||
assert batch.status in ["in_progress", "completed"]
|
assert batch.status in ["in_progress", "completed"]
|
||||||
assert set(batch.file_counts.keys()) >= {
|
assert batch.file_counts.total == len(file_ids)
|
||||||
"completed",
|
|
||||||
"failed",
|
|
||||||
"in_progress",
|
|
||||||
"cancelled",
|
|
||||||
"total",
|
|
||||||
}
|
|
||||||
assert batch.file_counts["total"] == len(file_ids)
|
|
||||||
assert hasattr(batch, "id")
|
assert hasattr(batch, "id")
|
||||||
assert hasattr(batch, "created_at")
|
assert hasattr(batch, "created_at")
|
||||||
|
|
||||||
# Retrieve the batch
|
# Wait for batch processing to complete
|
||||||
|
max_retries = 30 # 30 seconds max wait
|
||||||
|
retries = 0
|
||||||
|
retrieved_batch = None
|
||||||
|
while retries < max_retries:
|
||||||
retrieved_batch = compat_client.vector_stores.file_batches.retrieve(
|
retrieved_batch = compat_client.vector_stores.file_batches.retrieve(
|
||||||
vector_store_id=vector_store.id,
|
vector_store_id=vector_store.id,
|
||||||
batch_id=batch.id,
|
batch_id=batch.id,
|
||||||
)
|
)
|
||||||
|
if retrieved_batch.status in ["completed", "failed"]:
|
||||||
|
break
|
||||||
|
time.sleep(1)
|
||||||
|
retries += 1
|
||||||
|
|
||||||
assert retrieved_batch is not None
|
assert retrieved_batch is not None
|
||||||
assert retrieved_batch.id == batch.id
|
assert retrieved_batch.id == batch.id
|
||||||
assert retrieved_batch.vector_store_id == vector_store.id
|
assert retrieved_batch.vector_store_id == vector_store.id
|
||||||
assert retrieved_batch.object == "vector_store.file_batch"
|
assert retrieved_batch.object == "vector_store.file_batch"
|
||||||
assert retrieved_batch.file_counts["total"] == len(file_ids)
|
assert retrieved_batch.file_counts.total == len(file_ids)
|
||||||
|
assert retrieved_batch.status == "completed" # Should be completed after processing
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Client SDK needs updating")
|
|
||||||
def test_openai_vector_store_file_batch_list_files(compat_client_with_empty_stores, client_with_models):
|
def test_openai_vector_store_file_batch_list_files(compat_client_with_empty_stores, client_with_models):
|
||||||
"""Test listing files in a vector store file batch."""
|
"""Test listing files in a vector store file batch."""
|
||||||
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
||||||
|
@ -994,6 +994,19 @@ def test_openai_vector_store_file_batch_list_files(compat_client_with_empty_stor
|
||||||
file_ids=file_ids,
|
file_ids=file_ids,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Wait for batch processing to complete
|
||||||
|
max_retries = 30 # 30 seconds max wait
|
||||||
|
retries = 0
|
||||||
|
while retries < max_retries:
|
||||||
|
retrieved_batch = compat_client.vector_stores.file_batches.retrieve(
|
||||||
|
vector_store_id=vector_store.id,
|
||||||
|
batch_id=batch.id,
|
||||||
|
)
|
||||||
|
if retrieved_batch.status in ["completed", "failed"]:
|
||||||
|
break
|
||||||
|
time.sleep(1)
|
||||||
|
retries += 1
|
||||||
|
|
||||||
# List all files in the batch
|
# List all files in the batch
|
||||||
files_response = compat_client.vector_stores.file_batches.list_files(
|
files_response = compat_client.vector_stores.file_batches.list_files(
|
||||||
vector_store_id=vector_store.id,
|
vector_store_id=vector_store.id,
|
||||||
|
@ -1041,7 +1054,6 @@ def test_openai_vector_store_file_batch_list_files(compat_client_with_empty_stor
|
||||||
assert first_page_ids.isdisjoint(second_page_ids)
|
assert first_page_ids.isdisjoint(second_page_ids)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Client SDK needs updating")
|
|
||||||
def test_openai_vector_store_file_batch_cancel(compat_client_with_empty_stores, client_with_models):
|
def test_openai_vector_store_file_batch_cancel(compat_client_with_empty_stores, client_with_models):
|
||||||
"""Test cancelling a vector store file batch."""
|
"""Test cancelling a vector store file batch."""
|
||||||
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
||||||
|
@ -1064,7 +1076,6 @@ def test_openai_vector_store_file_batch_cancel(compat_client_with_empty_stores,
|
||||||
vector_store_id=vector_store.id,
|
vector_store_id=vector_store.id,
|
||||||
file_ids=file_ids,
|
file_ids=file_ids,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Try to cancel the batch (may fail if already completed)
|
# Try to cancel the batch (may fail if already completed)
|
||||||
try:
|
try:
|
||||||
cancelled_batch = compat_client.vector_stores.file_batches.cancel(
|
cancelled_batch = compat_client.vector_stores.file_batches.cancel(
|
||||||
|
@ -1085,7 +1096,6 @@ def test_openai_vector_store_file_batch_cancel(compat_client_with_empty_stores,
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Client SDK needs updating")
|
|
||||||
def test_openai_vector_store_file_batch_error_handling(compat_client_with_empty_stores, client_with_models):
|
def test_openai_vector_store_file_batch_error_handling(compat_client_with_empty_stores, client_with_models):
|
||||||
"""Test error handling for file batch operations."""
|
"""Test error handling for file batch operations."""
|
||||||
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
||||||
|
@ -1104,19 +1114,18 @@ def test_openai_vector_store_file_batch_error_handling(compat_client_with_empty_
|
||||||
)
|
)
|
||||||
|
|
||||||
assert batch is not None
|
assert batch is not None
|
||||||
assert batch.file_counts["total"] == len(file_ids)
|
assert batch.file_counts.total == len(file_ids)
|
||||||
# Invalid files should be marked as failed
|
# Invalid files should be marked as failed
|
||||||
assert batch.file_counts["failed"] >= 0 # Implementation may vary
|
assert batch.file_counts.failed >= 0 # Implementation may vary
|
||||||
|
|
||||||
# Test retrieving non-existent batch
|
# Test retrieving non-existent batch
|
||||||
with pytest.raises(ValueError): # Should raise an error for non-existent batch
|
with pytest.raises((BadRequestError, OpenAIBadRequestError)): # Should raise an error for non-existent batch
|
||||||
compat_client.vector_stores.file_batches.retrieve(
|
compat_client.vector_stores.file_batches.retrieve(
|
||||||
vector_store_id=vector_store.id,
|
vector_store_id=vector_store.id,
|
||||||
batch_id="non_existent_batch_id",
|
batch_id="non_existent_batch_id",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Test operations on non-existent vector store
|
# Test operations on non-existent vector store
|
||||||
with pytest.raises(ValueError): # Should raise an error for non-existent vector store
|
with pytest.raises((BadRequestError, OpenAIBadRequestError)): # Should raise an error for non-existent vector store
|
||||||
compat_client.vector_stores.file_batches.create(
|
compat_client.vector_stores.file_batches.create(
|
||||||
vector_store_id="non_existent_vector_store",
|
vector_store_id="non_existent_vector_store",
|
||||||
file_ids=["any_file_id"],
|
file_ids=["any_file_id"],
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue