This commit is contained in:
Swapna Lekkala 2025-09-30 10:31:56 -07:00
parent 852c058806
commit ca02ce0a98
3 changed files with 41 additions and 205 deletions

View file

@ -11,16 +11,16 @@
import uuid
from typing import Annotated, Any, Literal, Protocol, runtime_checkable
from pydantic import BaseModel, Field
from llama_stack.apis.inference import InterleavedContent
from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA
from llama_stack.apis.version import LLAMA_STACK_API_V1
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
from llama_stack.schema_utils import json_schema_type, webmethod
from llama_stack.strong_typing.schema import register_schema
from pydantic import BaseModel, Field
@json_schema_type
class ChunkMetadata(BaseModel):
@ -350,12 +350,7 @@ class VectorStoreFileLastError(BaseModel):
message: str
VectorStoreFileStatus = (
Literal["completed"]
| Literal["in_progress"]
| Literal["cancelled"]
| Literal["failed"]
)
VectorStoreFileStatus = Literal["completed"] | Literal["in_progress"] | Literal["cancelled"] | Literal["failed"]
register_schema(VectorStoreFileStatus, name="VectorStoreFileStatus")
@ -561,9 +556,7 @@ class VectorIO(Protocol):
"""
...
@webmethod(
route="/vector_stores/{vector_store_id}", method="GET", level=LLAMA_STACK_API_V1
)
@webmethod(route="/vector_stores/{vector_store_id}", method="GET", level=LLAMA_STACK_API_V1)
async def openai_retrieve_vector_store(
self,
vector_store_id: str,
@ -768,13 +761,9 @@ class VectorIO(Protocol):
...
@webmethod(
<<<<<<< HEAD
route="/vector_stores/{vector_store_id}/file_batches",
method="POST",
level=LLAMA_STACK_API_V1,
=======
route="/openai/v1/vector_stores/{vector_store_id}/file_batches", method="POST", level=LLAMA_STACK_API_V1ALPHA
>>>>>>> 2acf255f (add int tests)
)
async def openai_create_vector_store_file_batch(
self,
@ -794,15 +783,9 @@ class VectorIO(Protocol):
...
@webmethod(
<<<<<<< HEAD
route="/vector_stores/{vector_store_id}/file_batches/{batch_id}",
method="GET",
level=LLAMA_STACK_API_V1,
=======
route="/openai/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
method="GET",
level=LLAMA_STACK_API_V1ALPHA,
>>>>>>> 2acf255f (add int tests)
)
async def openai_retrieve_vector_store_file_batch(
self,
@ -818,15 +801,9 @@ class VectorIO(Protocol):
...
@webmethod(
<<<<<<< HEAD
route="/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
method="GET",
level=LLAMA_STACK_API_V1,
=======
route="/openai/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
method="GET",
level=LLAMA_STACK_API_V1ALPHA,
>>>>>>> 2acf255f (add int tests)
)
async def openai_list_files_in_vector_store_file_batch(
self,
@ -852,15 +829,9 @@ class VectorIO(Protocol):
...
@webmethod(
<<<<<<< HEAD
route="/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
method="POST",
level=LLAMA_STACK_API_V1,
=======
route="/openai/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
method="POST",
level=LLAMA_STACK_API_V1ALPHA,
>>>>>>> 2acf255f (add int tests)
)
async def openai_cancel_vector_store_file_batch(
self,