From 941f505eb08f7ba1ec325cb831016693c790993e Mon Sep 17 00:00:00 2001 From: Ben Browning Date: Fri, 13 Jun 2025 14:32:48 -0400 Subject: [PATCH] feat: File search tool for Responses API (#2426) # What does this PR do? This is an initial working prototype of wiring up the `file_search` builtin tool for the Responses API to our existing rag knowledge search tool. This is me seeing what I could pull together on top of the bits we already have merged. This may not be the ideal way to implement this, and things like how I shuffle the vector store ids from the original response API tool request to the actual tool execution feel a bit hacky (grep for `tool_kwargs["vector_db_ids"]` in `_execute_tool_call` to see what I mean). ## Test Plan I stubbed in some new tests to exercise this using text and pdf documents. Note that this is currently under tests/verification only because it sometimes flakes with tool calling of the small Llama-3.2-3B model we run in CI (and that I use as an example below). We'd want to make the test a bit more robust in some way if we moved this over to tests/integration and ran it in CI. ### OpenAI SaaS (to verify test correctness) ``` pytest -sv tests/verifications/openai_api/test_responses.py \ -k 'file_search' \ --base-url=https://api.openai.com/v1 \ --model=gpt-4o ``` ### Fireworks with faiss vector store ``` llama stack run llama_stack/templates/fireworks/run.yaml pytest -sv tests/verifications/openai_api/test_responses.py \ -k 'file_search' \ --base-url=http://localhost:8321/v1/openai/v1 \ --model=meta-llama/Llama-3.3-70B-Instruct ``` ### Ollama with faiss vector store This sometimes flakes on Ollama because the quantized small model doesn't always choose to call the tool to answer the user's question. But, it often works. ``` ollama run llama3.2:3b INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct" \ llama stack run ./llama_stack/templates/ollama/run.yaml \ --image-type venv \ --env OLLAMA_URL="http://0.0.0.0:11434" pytest -sv tests/verifications/openai_api/test_responses.py \ -k'file_search' \ --base-url=http://localhost:8321/v1/openai/v1 \ --model=meta-llama/Llama-3.2-3B-Instruct ``` ### OpenAI provider with sqlite-vec vector store ``` llama stack run ./llama_stack/templates/starter/run.yaml --image-type venv pytest -sv tests/verifications/openai_api/test_responses.py \ -k 'file_search' \ --base-url=http://localhost:8321/v1/openai/v1 \ --model=openai/gpt-4o-mini ``` ### Ensure existing vector store integration tests still pass ``` ollama run llama3.2:3b INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct" \ llama stack run ./llama_stack/templates/ollama/run.yaml \ --image-type venv \ --env OLLAMA_URL="http://0.0.0.0:11434" LLAMA_STACK_CONFIG=http://localhost:8321 \ pytest -sv tests/integration/vector_io \ --text-model "meta-llama/Llama-3.2-3B-Instruct" \ --embedding-model=all-MiniLM-L6-v2 ``` --------- Signed-off-by: Ben Browning --- docs/_static/llama-stack-spec.html | 377 +++++++++++++++++- docs/_static/llama-stack-spec.yaml | 234 ++++++++++- .../self_hosted_distro/ollama.md | 1 + llama_stack/apis/agents/openai_responses.py | 16 +- llama_stack/apis/vector_io/vector_io.py | 65 ++- llama_stack/distribution/routers/vector_io.py | 18 + .../agents/meta_reference/openai_responses.py | 58 ++- .../inline/tool_runtime/rag/memory.py | 2 + .../inline/vector_io/faiss/__init__.py | 2 +- .../providers/inline/vector_io/faiss/faiss.py | 4 +- .../inline/vector_io/sqlite_vec/__init__.py | 2 +- .../inline/vector_io/sqlite_vec/sqlite_vec.py | 4 +- llama_stack/providers/registry/vector_io.py | 4 + .../remote/vector_io/chroma/chroma.py | 10 + .../remote/vector_io/milvus/milvus.py | 10 + .../remote/vector_io/qdrant/qdrant.py | 10 + .../utils/memory/openai_vector_store_mixin.py | 97 +++++ .../providers/utils/memory/vector_store.py | 16 +- llama_stack/templates/ollama/build.yaml | 2 + llama_stack/templates/ollama/ollama.py | 9 + .../templates/ollama/run-with-safety.yaml | 9 + llama_stack/templates/ollama/run.yaml | 9 + llama_stack/templates/starter/build.yaml | 2 + llama_stack/templates/starter/run.yaml | 9 + llama_stack/templates/starter/starter.py | 8 + .../fixtures/pdfs/llama_stack_and_models.pdf | Bin 0 -> 37844 bytes .../fixtures/test_cases/responses.yaml | 19 + .../openai_api/test_responses.py | 132 ++++++ 28 files changed, 1105 insertions(+), 24 deletions(-) create mode 100644 tests/verifications/openai_api/fixtures/pdfs/llama_stack_and_models.pdf diff --git a/docs/_static/llama-stack-spec.html b/docs/_static/llama-stack-spec.html index 96de04ec9..ce47f8ebb 100644 --- a/docs/_static/llama-stack-spec.html +++ b/docs/_static/llama-stack-spec.html @@ -3240,6 +3240,59 @@ } } }, + "/v1/openai/v1/vector_stores/{vector_store_id}/files": { + "post": { + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the attached file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorIO" + ], + "description": "Attach a file to a vector store.", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "description": "The ID of the vector store to attach the file to.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenaiAttachFileToVectorStoreRequest" + } + } + }, + "required": true + } + } + }, "/v1/openai/v1/completions": { "post": { "responses": { @@ -7047,6 +7100,9 @@ { "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, { "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" }, @@ -7193,12 +7249,41 @@ "const": "file_search", "default": "file_search" }, - "vector_store_id": { + "vector_store_ids": { "type": "array", "items": { "type": "string" } }, + "filters": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + }, + "max_num_results": { + "type": "integer", + "default": 10 + }, "ranking_options": { "type": "object", "properties": { @@ -7217,7 +7302,7 @@ "additionalProperties": false, "required": [ "type", - "vector_store_id" + "vector_store_ids" ], "title": "OpenAIResponseInputToolFileSearch" }, @@ -7484,6 +7569,64 @@ ], "title": "OpenAIResponseOutputMessageContentOutputText" }, + "OpenAIResponseOutputMessageFileSearchToolCall": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "queries": { + "type": "array", + "items": { + "type": "string" + } + }, + "status": { + "type": "string" + }, + "type": { + "type": "string", + "const": "file_search_call", + "default": "file_search_call" + }, + "results": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + } + }, + "additionalProperties": false, + "required": [ + "id", + "queries", + "status", + "type" + ], + "title": "OpenAIResponseOutputMessageFileSearchToolCall" + }, "OpenAIResponseOutputMessageFunctionToolCall": { "type": "object", "properties": { @@ -7760,6 +7903,9 @@ { "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, { "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" }, @@ -7775,6 +7921,7 @@ "mapping": { "message": "#/components/schemas/OpenAIResponseMessage", "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall", + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" @@ -11766,6 +11913,232 @@ ], "title": "LogEventRequest" }, + "VectorStoreChunkingStrategy": { + "oneOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", + "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + } + }, + "VectorStoreChunkingStrategyAuto": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "auto", + "default": "auto" + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "VectorStoreChunkingStrategyAuto" + }, + "VectorStoreChunkingStrategyStatic": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "static", + "default": "static" + }, + "static": { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStaticConfig" + } + }, + "additionalProperties": false, + "required": [ + "type", + "static" + ], + "title": "VectorStoreChunkingStrategyStatic" + }, + "VectorStoreChunkingStrategyStaticConfig": { + "type": "object", + "properties": { + "chunk_overlap_tokens": { + "type": "integer", + "default": 400 + }, + "max_chunk_size_tokens": { + "type": "integer", + "default": 800 + } + }, + "additionalProperties": false, + "required": [ + "chunk_overlap_tokens", + "max_chunk_size_tokens" + ], + "title": "VectorStoreChunkingStrategyStaticConfig" + }, + "OpenaiAttachFileToVectorStoreRequest": { + "type": "object", + "properties": { + "file_id": { + "type": "string", + "description": "The ID of the file to attach to the vector store." + }, + "attributes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "The key-value attributes stored with the file, which can be used for filtering." + }, + "chunking_strategy": { + "$ref": "#/components/schemas/VectorStoreChunkingStrategy", + "description": "The chunking strategy to use for the file." + } + }, + "additionalProperties": false, + "required": [ + "file_id" + ], + "title": "OpenaiAttachFileToVectorStoreRequest" + }, + "VectorStoreFileLastError": { + "type": "object", + "properties": { + "code": { + "oneOf": [ + { + "type": "string", + "const": "server_error" + }, + { + "type": "string", + "const": "rate_limit_exceeded" + } + ] + }, + "message": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "code", + "message" + ], + "title": "VectorStoreFileLastError" + }, + "VectorStoreFileObject": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "object": { + "type": "string", + "default": "vector_store.file" + }, + "attributes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + }, + "chunking_strategy": { + "$ref": "#/components/schemas/VectorStoreChunkingStrategy" + }, + "created_at": { + "type": "integer" + }, + "last_error": { + "$ref": "#/components/schemas/VectorStoreFileLastError" + }, + "status": { + "oneOf": [ + { + "type": "string", + "const": "completed" + }, + { + "type": "string", + "const": "in_progress" + }, + { + "type": "string", + "const": "cancelled" + }, + { + "type": "string", + "const": "failed" + } + ] + }, + "usage_bytes": { + "type": "integer", + "default": 0 + }, + "vector_store_id": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "id", + "object", + "attributes", + "chunking_strategy", + "created_at", + "status", + "usage_bytes", + "vector_store_id" + ], + "title": "VectorStoreFileObject", + "description": "OpenAI Vector Store File object." + }, "OpenAIJSONSchema": { "type": "object", "properties": { diff --git a/docs/_static/llama-stack-spec.yaml b/docs/_static/llama-stack-spec.yaml index b2fe870be..07a176b32 100644 --- a/docs/_static/llama-stack-spec.yaml +++ b/docs/_static/llama-stack-spec.yaml @@ -2263,6 +2263,43 @@ paths: schema: $ref: '#/components/schemas/LogEventRequest' required: true + /v1/openai/v1/vector_stores/{vector_store_id}/files: + post: + responses: + '200': + description: >- + A VectorStoreFileObject representing the attached file. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreFileObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorIO + description: Attach a file to a vector store. + parameters: + - name: vector_store_id + in: path + description: >- + The ID of the vector store to attach the file to. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/OpenaiAttachFileToVectorStoreRequest' + required: true /v1/openai/v1/completions: post: responses: @@ -5021,6 +5058,7 @@ components: OpenAIResponseInput: oneOf: - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' - $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput' - $ref: '#/components/schemas/OpenAIResponseMessage' @@ -5115,10 +5153,23 @@ components: type: string const: file_search default: file_search - vector_store_id: + vector_store_ids: type: array items: type: string + filters: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + max_num_results: + type: integer + default: 10 ranking_options: type: object properties: @@ -5132,7 +5183,7 @@ components: additionalProperties: false required: - type - - vector_store_id + - vector_store_ids title: OpenAIResponseInputToolFileSearch OpenAIResponseInputToolFunction: type: object @@ -5294,6 +5345,41 @@ components: - type title: >- OpenAIResponseOutputMessageContentOutputText + "OpenAIResponseOutputMessageFileSearchToolCall": + type: object + properties: + id: + type: string + queries: + type: array + items: + type: string + status: + type: string + type: + type: string + const: file_search_call + default: file_search_call + results: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - id + - queries + - status + - type + title: >- + OpenAIResponseOutputMessageFileSearchToolCall "OpenAIResponseOutputMessageFunctionToolCall": type: object properties: @@ -5491,6 +5577,7 @@ components: oneOf: - $ref: '#/components/schemas/OpenAIResponseMessage' - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' @@ -5499,6 +5586,7 @@ components: mapping: message: '#/components/schemas/OpenAIResponseMessage' web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' @@ -8251,6 +8339,148 @@ components: - event - ttl_seconds title: LogEventRequest + VectorStoreChunkingStrategy: + oneOf: + - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' + - $ref: '#/components/schemas/VectorStoreChunkingStrategyStatic' + discriminator: + propertyName: type + mapping: + auto: '#/components/schemas/VectorStoreChunkingStrategyAuto' + static: '#/components/schemas/VectorStoreChunkingStrategyStatic' + VectorStoreChunkingStrategyAuto: + type: object + properties: + type: + type: string + const: auto + default: auto + additionalProperties: false + required: + - type + title: VectorStoreChunkingStrategyAuto + VectorStoreChunkingStrategyStatic: + type: object + properties: + type: + type: string + const: static + default: static + static: + $ref: '#/components/schemas/VectorStoreChunkingStrategyStaticConfig' + additionalProperties: false + required: + - type + - static + title: VectorStoreChunkingStrategyStatic + VectorStoreChunkingStrategyStaticConfig: + type: object + properties: + chunk_overlap_tokens: + type: integer + default: 400 + max_chunk_size_tokens: + type: integer + default: 800 + additionalProperties: false + required: + - chunk_overlap_tokens + - max_chunk_size_tokens + title: VectorStoreChunkingStrategyStaticConfig + OpenaiAttachFileToVectorStoreRequest: + type: object + properties: + file_id: + type: string + description: >- + The ID of the file to attach to the vector store. + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The key-value attributes stored with the file, which can be used for filtering. + chunking_strategy: + $ref: '#/components/schemas/VectorStoreChunkingStrategy' + description: >- + The chunking strategy to use for the file. + additionalProperties: false + required: + - file_id + title: OpenaiAttachFileToVectorStoreRequest + VectorStoreFileLastError: + type: object + properties: + code: + oneOf: + - type: string + const: server_error + - type: string + const: rate_limit_exceeded + message: + type: string + additionalProperties: false + required: + - code + - message + title: VectorStoreFileLastError + VectorStoreFileObject: + type: object + properties: + id: + type: string + object: + type: string + default: vector_store.file + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + chunking_strategy: + $ref: '#/components/schemas/VectorStoreChunkingStrategy' + created_at: + type: integer + last_error: + $ref: '#/components/schemas/VectorStoreFileLastError' + status: + oneOf: + - type: string + const: completed + - type: string + const: in_progress + - type: string + const: cancelled + - type: string + const: failed + usage_bytes: + type: integer + default: 0 + vector_store_id: + type: string + additionalProperties: false + required: + - id + - object + - attributes + - chunking_strategy + - created_at + - status + - usage_bytes + - vector_store_id + title: VectorStoreFileObject + description: OpenAI Vector Store File object. OpenAIJSONSchema: type: object properties: diff --git a/docs/source/distributions/self_hosted_distro/ollama.md b/docs/source/distributions/self_hosted_distro/ollama.md index 4d148feda..e09c79359 100644 --- a/docs/source/distributions/self_hosted_distro/ollama.md +++ b/docs/source/distributions/self_hosted_distro/ollama.md @@ -18,6 +18,7 @@ The `llamastack/distribution-ollama` distribution consists of the following prov | agents | `inline::meta-reference` | | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | +| files | `inline::localfs` | | inference | `remote::ollama` | | post_training | `inline::huggingface` | | safety | `inline::llama-guard` | diff --git a/llama_stack/apis/agents/openai_responses.py b/llama_stack/apis/agents/openai_responses.py index 35b3d5ace..2e1cb257a 100644 --- a/llama_stack/apis/agents/openai_responses.py +++ b/llama_stack/apis/agents/openai_responses.py @@ -81,6 +81,15 @@ class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel): type: Literal["web_search_call"] = "web_search_call" +@json_schema_type +class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel): + id: str + queries: list[str] + status: str + type: Literal["file_search_call"] = "file_search_call" + results: list[dict[str, Any]] | None = None + + @json_schema_type class OpenAIResponseOutputMessageFunctionToolCall(BaseModel): call_id: str @@ -119,6 +128,7 @@ class OpenAIResponseOutputMessageMCPListTools(BaseModel): OpenAIResponseOutput = Annotated[ OpenAIResponseMessage | OpenAIResponseOutputMessageWebSearchToolCall + | OpenAIResponseOutputMessageFileSearchToolCall | OpenAIResponseOutputMessageFunctionToolCall | OpenAIResponseOutputMessageMCPCall | OpenAIResponseOutputMessageMCPListTools, @@ -362,6 +372,7 @@ class OpenAIResponseInputFunctionToolCallOutput(BaseModel): OpenAIResponseInput = Annotated[ # Responses API allows output messages to be passed in as input OpenAIResponseOutputMessageWebSearchToolCall + | OpenAIResponseOutputMessageFileSearchToolCall | OpenAIResponseOutputMessageFunctionToolCall | OpenAIResponseInputFunctionToolCallOutput | @@ -397,9 +408,10 @@ class FileSearchRankingOptions(BaseModel): @json_schema_type class OpenAIResponseInputToolFileSearch(BaseModel): type: Literal["file_search"] = "file_search" - vector_store_id: list[str] + vector_store_ids: list[str] + filters: dict[str, Any] | None = None + max_num_results: int | None = Field(default=10, ge=1, le=50) ranking_options: FileSearchRankingOptions | None = None - # TODO: add filters class ApprovalFilter(BaseModel): diff --git a/llama_stack/apis/vector_io/vector_io.py b/llama_stack/apis/vector_io/vector_io.py index 1c8ae4dab..77d4cfc5a 100644 --- a/llama_stack/apis/vector_io/vector_io.py +++ b/llama_stack/apis/vector_io/vector_io.py @@ -8,7 +8,7 @@ # # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from typing import Any, Literal, Protocol, runtime_checkable +from typing import Annotated, Any, Literal, Protocol, runtime_checkable from pydantic import BaseModel, Field @@ -16,6 +16,7 @@ from llama_stack.apis.inference import InterleavedContent from llama_stack.apis.vector_dbs import VectorDB from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol from llama_stack.schema_utils import json_schema_type, webmethod +from llama_stack.strong_typing.schema import register_schema class Chunk(BaseModel): @@ -133,6 +134,50 @@ class VectorStoreDeleteResponse(BaseModel): deleted: bool = True +@json_schema_type +class VectorStoreChunkingStrategyAuto(BaseModel): + type: Literal["auto"] = "auto" + + +@json_schema_type +class VectorStoreChunkingStrategyStaticConfig(BaseModel): + chunk_overlap_tokens: int = 400 + max_chunk_size_tokens: int = Field(800, ge=100, le=4096) + + +@json_schema_type +class VectorStoreChunkingStrategyStatic(BaseModel): + type: Literal["static"] = "static" + static: VectorStoreChunkingStrategyStaticConfig + + +VectorStoreChunkingStrategy = Annotated[ + VectorStoreChunkingStrategyAuto | VectorStoreChunkingStrategyStatic, Field(discriminator="type") +] +register_schema(VectorStoreChunkingStrategy, name="VectorStoreChunkingStrategy") + + +@json_schema_type +class VectorStoreFileLastError(BaseModel): + code: Literal["server_error"] | Literal["rate_limit_exceeded"] + message: str + + +@json_schema_type +class VectorStoreFileObject(BaseModel): + """OpenAI Vector Store File object.""" + + id: str + object: str = "vector_store.file" + attributes: dict[str, Any] = Field(default_factory=dict) + chunking_strategy: VectorStoreChunkingStrategy + created_at: int + last_error: VectorStoreFileLastError | None = None + status: Literal["completed"] | Literal["in_progress"] | Literal["cancelled"] | Literal["failed"] + usage_bytes: int = 0 + vector_store_id: str + + class VectorDBStore(Protocol): def get_vector_db(self, vector_db_id: str) -> VectorDB | None: ... @@ -290,3 +335,21 @@ class VectorIO(Protocol): :returns: A VectorStoreSearchResponse containing the search results. """ ... + + @webmethod(route="/openai/v1/vector_stores/{vector_store_id}/files", method="POST") + async def openai_attach_file_to_vector_store( + self, + vector_store_id: str, + file_id: str, + attributes: dict[str, Any] | None = None, + chunking_strategy: VectorStoreChunkingStrategy | None = None, + ) -> VectorStoreFileObject: + """Attach a file to a vector store. + + :param vector_store_id: The ID of the vector store to attach the file to. + :param file_id: The ID of the file to attach to the vector store. + :param attributes: The key-value attributes stored with the file, which can be used for filtering. + :param chunking_strategy: The chunking strategy to use for the file. + :returns: A VectorStoreFileObject representing the attached file. + """ + ... diff --git a/llama_stack/distribution/routers/vector_io.py b/llama_stack/distribution/routers/vector_io.py index 3d65aef24..8eb56b7ca 100644 --- a/llama_stack/distribution/routers/vector_io.py +++ b/llama_stack/distribution/routers/vector_io.py @@ -19,6 +19,7 @@ from llama_stack.apis.vector_io import ( VectorStoreObject, VectorStoreSearchResponsePage, ) +from llama_stack.apis.vector_io.vector_io import VectorStoreChunkingStrategy, VectorStoreFileObject from llama_stack.log import get_logger from llama_stack.providers.datatypes import RoutingTable @@ -254,3 +255,20 @@ class VectorIORouter(VectorIO): ranking_options=ranking_options, rewrite_query=rewrite_query, ) + + async def openai_attach_file_to_vector_store( + self, + vector_store_id: str, + file_id: str, + attributes: dict[str, Any] | None = None, + chunking_strategy: VectorStoreChunkingStrategy | None = None, + ) -> VectorStoreFileObject: + logger.debug(f"VectorIORouter.openai_attach_file_to_vector_store: {vector_store_id}, {file_id}") + # Route based on vector store ID + provider = self.routing_table.get_provider_impl(vector_store_id) + return await provider.openai_attach_file_to_vector_store( + vector_store_id=vector_store_id, + file_id=file_id, + attributes=attributes, + chunking_strategy=chunking_strategy, + ) diff --git a/llama_stack/providers/inline/agents/meta_reference/openai_responses.py b/llama_stack/providers/inline/agents/meta_reference/openai_responses.py index 0ff6dc2c5..33fcbfa5d 100644 --- a/llama_stack/providers/inline/agents/meta_reference/openai_responses.py +++ b/llama_stack/providers/inline/agents/meta_reference/openai_responses.py @@ -24,6 +24,7 @@ from llama_stack.apis.agents.openai_responses import ( OpenAIResponseInputMessageContentImage, OpenAIResponseInputMessageContentText, OpenAIResponseInputTool, + OpenAIResponseInputToolFileSearch, OpenAIResponseInputToolMCP, OpenAIResponseMessage, OpenAIResponseObject, @@ -34,6 +35,7 @@ from llama_stack.apis.agents.openai_responses import ( OpenAIResponseOutput, OpenAIResponseOutputMessageContent, OpenAIResponseOutputMessageContentOutputText, + OpenAIResponseOutputMessageFileSearchToolCall, OpenAIResponseOutputMessageFunctionToolCall, OpenAIResponseOutputMessageMCPListTools, OpenAIResponseOutputMessageWebSearchToolCall, @@ -62,7 +64,7 @@ from llama_stack.apis.inference.inference import ( OpenAIToolMessageParam, OpenAIUserMessageParam, ) -from llama_stack.apis.tools.tools import ToolGroups, ToolRuntime +from llama_stack.apis.tools import RAGQueryConfig, ToolGroups, ToolRuntime from llama_stack.log import get_logger from llama_stack.models.llama.datatypes import ToolDefinition, ToolParamDefinition from llama_stack.providers.utils.inference.openai_compat import convert_tooldef_to_openai_tool @@ -198,7 +200,8 @@ class OpenAIResponsePreviousResponseWithInputItems(BaseModel): class ChatCompletionContext(BaseModel): model: str messages: list[OpenAIMessageParam] - tools: list[ChatCompletionToolParam] | None = None + response_tools: list[OpenAIResponseInputTool] | None = None + chat_tools: list[ChatCompletionToolParam] | None = None mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] temperature: float | None response_format: OpenAIResponseFormatParam @@ -388,7 +391,8 @@ class OpenAIResponsesImpl: ctx = ChatCompletionContext( model=model, messages=messages, - tools=chat_tools, + response_tools=tools, + chat_tools=chat_tools, mcp_tool_to_server=mcp_tool_to_server, temperature=temperature, response_format=response_format, @@ -417,7 +421,7 @@ class OpenAIResponsesImpl: completion_result = await self.inference_api.openai_chat_completion( model=ctx.model, messages=messages, - tools=ctx.tools, + tools=ctx.chat_tools, stream=True, temperature=ctx.temperature, response_format=ctx.response_format, @@ -606,6 +610,12 @@ class OpenAIResponsesImpl: if not tool: raise ValueError(f"Tool {tool_name} not found") chat_tools.append(make_openai_tool(tool_name, tool)) + elif input_tool.type == "file_search": + tool_name = "knowledge_search" + tool = await self.tool_groups_api.get_tool(tool_name) + if not tool: + raise ValueError(f"Tool {tool_name} not found") + chat_tools.append(make_openai_tool(tool_name, tool)) elif input_tool.type == "mcp": always_allowed = None never_allowed = None @@ -667,6 +677,7 @@ class OpenAIResponsesImpl: tool_call_id = tool_call.id function = tool_call.function + tool_kwargs = json.loads(function.arguments) if function.arguments else {} if not function or not tool_call_id or not function.name: return None, None @@ -680,12 +691,26 @@ class OpenAIResponsesImpl: endpoint=mcp_tool.server_url, headers=mcp_tool.headers or {}, tool_name=function.name, - kwargs=json.loads(function.arguments) if function.arguments else {}, + kwargs=tool_kwargs, ) else: + if function.name == "knowledge_search": + response_file_search_tool = next( + t for t in ctx.response_tools if isinstance(t, OpenAIResponseInputToolFileSearch) + ) + if response_file_search_tool: + if response_file_search_tool.filters: + logger.warning("Filters are not yet supported for file_search tool") + if response_file_search_tool.ranking_options: + logger.warning("Ranking options are not yet supported for file_search tool") + tool_kwargs["vector_db_ids"] = response_file_search_tool.vector_store_ids + tool_kwargs["query_config"] = RAGQueryConfig( + mode="vector", + max_chunks=response_file_search_tool.max_num_results, + ) result = await self.tool_runtime_api.invoke_tool( tool_name=function.name, - kwargs=json.loads(function.arguments) if function.arguments else {}, + kwargs=tool_kwargs, ) except Exception as e: error_exc = e @@ -713,6 +738,27 @@ class OpenAIResponsesImpl: ) if error_exc or (result.error_code and result.error_code > 0) or result.error_message: message.status = "failed" + elif function.name == "knowledge_search": + message = OpenAIResponseOutputMessageFileSearchToolCall( + id=tool_call_id, + queries=[tool_kwargs.get("query", "")], + status="completed", + ) + if "document_ids" in result.metadata: + message.results = [] + for i, doc_id in enumerate(result.metadata["document_ids"]): + text = result.metadata["chunks"][i] if "chunks" in result.metadata else None + score = result.metadata["scores"][i] if "scores" in result.metadata else None + message.results.append( + { + "file_id": doc_id, + "filename": doc_id, + "text": text, + "score": score, + } + ) + if error_exc or (result.error_code and result.error_code > 0) or result.error_message: + message.status = "failed" else: raise ValueError(f"Unknown tool {function.name} called") diff --git a/llama_stack/providers/inline/tool_runtime/rag/memory.py b/llama_stack/providers/inline/tool_runtime/rag/memory.py index 4776d47d0..e15d067a7 100644 --- a/llama_stack/providers/inline/tool_runtime/rag/memory.py +++ b/llama_stack/providers/inline/tool_runtime/rag/memory.py @@ -170,6 +170,8 @@ class MemoryToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, RAGToolRunti content=picked, metadata={ "document_ids": [c.metadata["document_id"] for c in chunks[: len(picked)]], + "chunks": [c.content for c in chunks[: len(picked)]], + "scores": scores[: len(picked)], }, ) diff --git a/llama_stack/providers/inline/vector_io/faiss/__init__.py b/llama_stack/providers/inline/vector_io/faiss/__init__.py index 68a1dee66..dd1c59b7b 100644 --- a/llama_stack/providers/inline/vector_io/faiss/__init__.py +++ b/llama_stack/providers/inline/vector_io/faiss/__init__.py @@ -16,6 +16,6 @@ async def get_provider_impl(config: FaissVectorIOConfig, deps: dict[Api, Any]): assert isinstance(config, FaissVectorIOConfig), f"Unexpected config type: {type(config)}" - impl = FaissVectorIOAdapter(config, deps[Api.inference]) + impl = FaissVectorIOAdapter(config, deps[Api.inference], deps.get(Api.files, None)) await impl.initialize() return impl diff --git a/llama_stack/providers/inline/vector_io/faiss/faiss.py b/llama_stack/providers/inline/vector_io/faiss/faiss.py index 5e9155011..afb911726 100644 --- a/llama_stack/providers/inline/vector_io/faiss/faiss.py +++ b/llama_stack/providers/inline/vector_io/faiss/faiss.py @@ -15,6 +15,7 @@ import faiss import numpy as np from numpy.typing import NDArray +from llama_stack.apis.files import Files from llama_stack.apis.inference import InterleavedContent from llama_stack.apis.inference.inference import Inference from llama_stack.apis.vector_dbs import VectorDB @@ -132,9 +133,10 @@ class FaissIndex(EmbeddingIndex): class FaissVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolPrivate): - def __init__(self, config: FaissVectorIOConfig, inference_api: Inference) -> None: + def __init__(self, config: FaissVectorIOConfig, inference_api: Inference, files_api: Files | None) -> None: self.config = config self.inference_api = inference_api + self.files_api = files_api self.cache: dict[str, VectorDBWithIndex] = {} self.kvstore: KVStore | None = None self.openai_vector_stores: dict[str, dict[str, Any]] = {} diff --git a/llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py b/llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py index 6db176eda..e5200a755 100644 --- a/llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +++ b/llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py @@ -15,6 +15,6 @@ async def get_provider_impl(config: SQLiteVectorIOConfig, deps: dict[Api, Any]): from .sqlite_vec import SQLiteVecVectorIOAdapter assert isinstance(config, SQLiteVectorIOConfig), f"Unexpected config type: {type(config)}" - impl = SQLiteVecVectorIOAdapter(config, deps[Api.inference]) + impl = SQLiteVecVectorIOAdapter(config, deps[Api.inference], deps.get(Api.files, None)) await impl.initialize() return impl diff --git a/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py b/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py index 02f04e766..f69cf8a32 100644 --- a/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +++ b/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py @@ -17,6 +17,7 @@ import numpy as np import sqlite_vec from numpy.typing import NDArray +from llama_stack.apis.files.files import Files from llama_stack.apis.inference.inference import Inference from llama_stack.apis.vector_dbs import VectorDB from llama_stack.apis.vector_io import ( @@ -301,9 +302,10 @@ class SQLiteVecVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtoc and creates a cache of VectorDBWithIndex instances (each wrapping a SQLiteVecIndex). """ - def __init__(self, config, inference_api: Inference) -> None: + def __init__(self, config, inference_api: Inference, files_api: Files | None) -> None: self.config = config self.inference_api = inference_api + self.files_api = files_api self.cache: dict[str, VectorDBWithIndex] = {} self.openai_vector_stores: dict[str, dict[str, Any]] = {} diff --git a/llama_stack/providers/registry/vector_io.py b/llama_stack/providers/registry/vector_io.py index d888c8420..55c1b5617 100644 --- a/llama_stack/providers/registry/vector_io.py +++ b/llama_stack/providers/registry/vector_io.py @@ -24,6 +24,7 @@ def available_providers() -> list[ProviderSpec]: config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", deprecation_warning="Please use the `inline::faiss` provider instead.", api_dependencies=[Api.inference], + optional_api_dependencies=[Api.files], ), InlineProviderSpec( api=Api.vector_io, @@ -32,6 +33,7 @@ def available_providers() -> list[ProviderSpec]: module="llama_stack.providers.inline.vector_io.faiss", config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", api_dependencies=[Api.inference], + optional_api_dependencies=[Api.files], ), # NOTE: sqlite-vec cannot be bundled into the container image because it does not have a # source distribution and the wheels are not available for all platforms. @@ -42,6 +44,7 @@ def available_providers() -> list[ProviderSpec]: module="llama_stack.providers.inline.vector_io.sqlite_vec", config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig", api_dependencies=[Api.inference], + optional_api_dependencies=[Api.files], ), InlineProviderSpec( api=Api.vector_io, @@ -51,6 +54,7 @@ def available_providers() -> list[ProviderSpec]: config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig", deprecation_warning="Please use the `inline::sqlite-vec` provider (notice the hyphen instead of underscore) instead.", api_dependencies=[Api.inference], + optional_api_dependencies=[Api.files], ), remote_provider_spec( Api.vector_io, diff --git a/llama_stack/providers/remote/vector_io/chroma/chroma.py b/llama_stack/providers/remote/vector_io/chroma/chroma.py index 0d8451eb2..fee29cfd9 100644 --- a/llama_stack/providers/remote/vector_io/chroma/chroma.py +++ b/llama_stack/providers/remote/vector_io/chroma/chroma.py @@ -23,6 +23,7 @@ from llama_stack.apis.vector_io import ( VectorStoreObject, VectorStoreSearchResponsePage, ) +from llama_stack.apis.vector_io.vector_io import VectorStoreChunkingStrategy, VectorStoreFileObject from llama_stack.providers.datatypes import Api, VectorDBsProtocolPrivate from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig from llama_stack.providers.utils.memory.vector_store import ( @@ -241,3 +242,12 @@ class ChromaVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate): rewrite_query: bool | None = False, ) -> VectorStoreSearchResponsePage: raise NotImplementedError("OpenAI Vector Stores API is not supported in Chroma") + + async def openai_attach_file_to_vector_store( + self, + vector_store_id: str, + file_id: str, + attributes: dict[str, Any] | None = None, + chunking_strategy: VectorStoreChunkingStrategy | None = None, + ) -> VectorStoreFileObject: + raise NotImplementedError("OpenAI Vector Stores API is not supported in Chroma") diff --git a/llama_stack/providers/remote/vector_io/milvus/milvus.py b/llama_stack/providers/remote/vector_io/milvus/milvus.py index 8ae74aedc..51c541c02 100644 --- a/llama_stack/providers/remote/vector_io/milvus/milvus.py +++ b/llama_stack/providers/remote/vector_io/milvus/milvus.py @@ -25,6 +25,7 @@ from llama_stack.apis.vector_io import ( VectorStoreObject, VectorStoreSearchResponsePage, ) +from llama_stack.apis.vector_io.vector_io import VectorStoreChunkingStrategy, VectorStoreFileObject from llama_stack.providers.datatypes import Api, VectorDBsProtocolPrivate from llama_stack.providers.inline.vector_io.milvus import MilvusVectorIOConfig as InlineMilvusVectorIOConfig from llama_stack.providers.utils.memory.vector_store import ( @@ -240,6 +241,15 @@ class MilvusVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate): ) -> VectorStoreSearchResponsePage: raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant") + async def openai_attach_file_to_vector_store( + self, + vector_store_id: str, + file_id: str, + attributes: dict[str, Any] | None = None, + chunking_strategy: VectorStoreChunkingStrategy | None = None, + ) -> VectorStoreFileObject: + raise NotImplementedError("OpenAI Vector Stores API is not supported in Milvus") + def generate_chunk_id(document_id: str, chunk_text: str) -> str: """Generate a unique chunk ID using a hash of document ID and chunk text.""" diff --git a/llama_stack/providers/remote/vector_io/qdrant/qdrant.py b/llama_stack/providers/remote/vector_io/qdrant/qdrant.py index 10f3b5b0d..1631a7a2a 100644 --- a/llama_stack/providers/remote/vector_io/qdrant/qdrant.py +++ b/llama_stack/providers/remote/vector_io/qdrant/qdrant.py @@ -23,6 +23,7 @@ from llama_stack.apis.vector_io import ( VectorStoreObject, VectorStoreSearchResponsePage, ) +from llama_stack.apis.vector_io.vector_io import VectorStoreChunkingStrategy, VectorStoreFileObject from llama_stack.providers.datatypes import Api, VectorDBsProtocolPrivate from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig from llama_stack.providers.utils.memory.vector_store import ( @@ -241,3 +242,12 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate): rewrite_query: bool | None = False, ) -> VectorStoreSearchResponsePage: raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant") + + async def openai_attach_file_to_vector_store( + self, + vector_store_id: str, + file_id: str, + attributes: dict[str, Any] | None = None, + chunking_strategy: VectorStoreChunkingStrategy | None = None, + ) -> VectorStoreFileObject: + raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant") diff --git a/llama_stack/providers/utils/memory/openai_vector_store_mixin.py b/llama_stack/providers/utils/memory/openai_vector_store_mixin.py index 7d8163ed2..f9701897a 100644 --- a/llama_stack/providers/utils/memory/openai_vector_store_mixin.py +++ b/llama_stack/providers/utils/memory/openai_vector_store_mixin.py @@ -5,11 +5,13 @@ # the root directory of this source tree. import logging +import mimetypes import time import uuid from abc import ABC, abstractmethod from typing import Any +from llama_stack.apis.files import Files from llama_stack.apis.vector_dbs import VectorDB from llama_stack.apis.vector_io import ( QueryChunksResponse, @@ -20,6 +22,15 @@ from llama_stack.apis.vector_io import ( VectorStoreSearchResponse, VectorStoreSearchResponsePage, ) +from llama_stack.apis.vector_io.vector_io import ( + Chunk, + VectorStoreChunkingStrategy, + VectorStoreChunkingStrategyAuto, + VectorStoreChunkingStrategyStatic, + VectorStoreFileLastError, + VectorStoreFileObject, +) +from llama_stack.providers.utils.memory.vector_store import content_from_data_and_mime_type, make_overlapped_chunks logger = logging.getLogger(__name__) @@ -36,6 +47,7 @@ class OpenAIVectorStoreMixin(ABC): # These should be provided by the implementing class openai_vector_stores: dict[str, dict[str, Any]] + files_api: Files | None @abstractmethod async def _save_openai_vector_store(self, store_id: str, store_info: dict[str, Any]) -> None: @@ -67,6 +79,16 @@ class OpenAIVectorStoreMixin(ABC): """Unregister a vector database (provider-specific implementation).""" pass + @abstractmethod + async def insert_chunks( + self, + vector_db_id: str, + chunks: list[Chunk], + ttl_seconds: int | None = None, + ) -> None: + """Insert chunks into a vector database (provider-specific implementation).""" + pass + @abstractmethod async def query_chunks( self, vector_db_id: str, query: Any, params: dict[str, Any] | None = None @@ -383,3 +405,78 @@ class OpenAIVectorStoreMixin(ABC): if metadata[key] != value: return False return True + + async def openai_attach_file_to_vector_store( + self, + vector_store_id: str, + file_id: str, + attributes: dict[str, Any] | None = None, + chunking_strategy: VectorStoreChunkingStrategy | None = None, + ) -> VectorStoreFileObject: + attributes = attributes or {} + chunking_strategy = chunking_strategy or VectorStoreChunkingStrategyAuto() + + vector_store_file_object = VectorStoreFileObject( + id=file_id, + attributes=attributes, + chunking_strategy=chunking_strategy, + created_at=int(time.time()), + status="in_progress", + vector_store_id=vector_store_id, + ) + + if not hasattr(self, "files_api") or not self.files_api: + vector_store_file_object.status = "failed" + vector_store_file_object.last_error = VectorStoreFileLastError( + code="server_error", + message="Files API is not available", + ) + return vector_store_file_object + + if isinstance(chunking_strategy, VectorStoreChunkingStrategyStatic): + max_chunk_size_tokens = chunking_strategy.static.max_chunk_size_tokens + chunk_overlap_tokens = chunking_strategy.static.chunk_overlap_tokens + else: + # Default values from OpenAI API spec + max_chunk_size_tokens = 800 + chunk_overlap_tokens = 400 + + try: + file_response = await self.files_api.openai_retrieve_file(file_id) + mime_type, _ = mimetypes.guess_type(file_response.filename) + content_response = await self.files_api.openai_retrieve_file_content(file_id) + + content = content_from_data_and_mime_type(content_response.body, mime_type) + + chunks = make_overlapped_chunks( + file_id, + content, + max_chunk_size_tokens, + chunk_overlap_tokens, + attributes, + ) + + if not chunks: + vector_store_file_object.status = "failed" + vector_store_file_object.last_error = VectorStoreFileLastError( + code="server_error", + message="No chunks were generated from the file", + ) + return vector_store_file_object + + await self.insert_chunks( + vector_db_id=vector_store_id, + chunks=chunks, + ) + except Exception as e: + logger.error(f"Error attaching file to vector store: {e}") + vector_store_file_object.status = "failed" + vector_store_file_object.last_error = VectorStoreFileLastError( + code="server_error", + message=str(e), + ) + return vector_store_file_object + + vector_store_file_object.status = "completed" + + return vector_store_file_object diff --git a/llama_stack/providers/utils/memory/vector_store.py b/llama_stack/providers/utils/memory/vector_store.py index 4cd15860b..2c0c7c8e9 100644 --- a/llama_stack/providers/utils/memory/vector_store.py +++ b/llama_stack/providers/utils/memory/vector_store.py @@ -72,16 +72,18 @@ def content_from_data(data_url: str) -> str: data = unquote(data) encoding = parts["encoding"] or "utf-8" data = data.encode(encoding) + return content_from_data_and_mime_type(data, parts["mimetype"], parts.get("encoding", None)) - encoding = parts["encoding"] - if not encoding: - import chardet - detected = chardet.detect(data) - encoding = detected["encoding"] +def content_from_data_and_mime_type(data: bytes | str, mime_type: str | None, encoding: str | None = None) -> str: + if isinstance(data, bytes): + if not encoding: + import chardet - mime_type = parts["mimetype"] - mime_category = mime_type.split("/")[0] + detected = chardet.detect(data) + encoding = detected["encoding"] + + mime_category = mime_type.split("/")[0] if mime_type else None if mime_category == "text": # For text-based files (including CSV, MD) return data.decode(encoding) diff --git a/llama_stack/templates/ollama/build.yaml b/llama_stack/templates/ollama/build.yaml index 36a120897..ebe0849f3 100644 --- a/llama_stack/templates/ollama/build.yaml +++ b/llama_stack/templates/ollama/build.yaml @@ -23,6 +23,8 @@ distribution_spec: - inline::basic - inline::llm-as-judge - inline::braintrust + files: + - inline::localfs post_training: - inline::huggingface tool_runtime: diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py index 0b4f05128..46c4852a4 100644 --- a/llama_stack/templates/ollama/ollama.py +++ b/llama_stack/templates/ollama/ollama.py @@ -13,6 +13,7 @@ from llama_stack.distribution.datatypes import ( ShieldInput, ToolGroupInput, ) +from llama_stack.providers.inline.files.localfs.config import LocalfsFilesImplConfig from llama_stack.providers.inline.post_training.huggingface import HuggingFacePostTrainingConfig from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig from llama_stack.providers.remote.inference.ollama import OllamaImplConfig @@ -29,6 +30,7 @@ def get_distribution_template() -> DistributionTemplate: "eval": ["inline::meta-reference"], "datasetio": ["remote::huggingface", "inline::localfs"], "scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"], + "files": ["inline::localfs"], "post_training": ["inline::huggingface"], "tool_runtime": [ "remote::brave-search", @@ -49,6 +51,11 @@ def get_distribution_template() -> DistributionTemplate: provider_type="inline::faiss", config=FaissVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"), ) + files_provider = Provider( + provider_id="meta-reference-files", + provider_type="inline::localfs", + config=LocalfsFilesImplConfig.sample_run_config(f"~/.llama/distributions/{name}"), + ) posttraining_provider = Provider( provider_id="huggingface", provider_type="inline::huggingface", @@ -98,6 +105,7 @@ def get_distribution_template() -> DistributionTemplate: provider_overrides={ "inference": [inference_provider], "vector_io": [vector_io_provider_faiss], + "files": [files_provider], "post_training": [posttraining_provider], }, default_models=[inference_model, embedding_model], @@ -107,6 +115,7 @@ def get_distribution_template() -> DistributionTemplate: provider_overrides={ "inference": [inference_provider], "vector_io": [vector_io_provider_faiss], + "files": [files_provider], "post_training": [posttraining_provider], "safety": [ Provider( diff --git a/llama_stack/templates/ollama/run-with-safety.yaml b/llama_stack/templates/ollama/run-with-safety.yaml index 7bf9fc3bd..85d5c813b 100644 --- a/llama_stack/templates/ollama/run-with-safety.yaml +++ b/llama_stack/templates/ollama/run-with-safety.yaml @@ -4,6 +4,7 @@ apis: - agents - datasetio - eval +- files - inference - post_training - safety @@ -84,6 +85,14 @@ providers: provider_type: inline::braintrust config: openai_api_key: ${env.OPENAI_API_KEY:} + files: + - provider_id: meta-reference-files + provider_type: inline::localfs + config: + storage_dir: ${env.FILES_STORAGE_DIR:~/.llama/distributions/ollama/files} + metadata_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/files_metadata.db post_training: - provider_id: huggingface provider_type: inline::huggingface diff --git a/llama_stack/templates/ollama/run.yaml b/llama_stack/templates/ollama/run.yaml index 0030bcd60..2d10a99a4 100644 --- a/llama_stack/templates/ollama/run.yaml +++ b/llama_stack/templates/ollama/run.yaml @@ -4,6 +4,7 @@ apis: - agents - datasetio - eval +- files - inference - post_training - safety @@ -82,6 +83,14 @@ providers: provider_type: inline::braintrust config: openai_api_key: ${env.OPENAI_API_KEY:} + files: + - provider_id: meta-reference-files + provider_type: inline::localfs + config: + storage_dir: ${env.FILES_STORAGE_DIR:~/.llama/distributions/ollama/files} + metadata_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/files_metadata.db post_training: - provider_id: huggingface provider_type: inline::huggingface diff --git a/llama_stack/templates/starter/build.yaml b/llama_stack/templates/starter/build.yaml index 5fd3cc3f5..9bf4913a7 100644 --- a/llama_stack/templates/starter/build.yaml +++ b/llama_stack/templates/starter/build.yaml @@ -17,6 +17,8 @@ distribution_spec: - inline::sqlite-vec - remote::chromadb - remote::pgvector + files: + - inline::localfs safety: - inline::llama-guard agents: diff --git a/llama_stack/templates/starter/run.yaml b/llama_stack/templates/starter/run.yaml index 4732afa77..319ababe5 100644 --- a/llama_stack/templates/starter/run.yaml +++ b/llama_stack/templates/starter/run.yaml @@ -4,6 +4,7 @@ apis: - agents - datasetio - eval +- files - inference - safety - scoring @@ -75,6 +76,14 @@ providers: db: ${env.PGVECTOR_DB:} user: ${env.PGVECTOR_USER:} password: ${env.PGVECTOR_PASSWORD:} + files: + - provider_id: meta-reference-files + provider_type: inline::localfs + config: + storage_dir: ${env.FILES_STORAGE_DIR:~/.llama/distributions/starter/files} + metadata_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/files_metadata.db safety: - provider_id: llama-guard provider_type: inline::llama-guard diff --git a/llama_stack/templates/starter/starter.py b/llama_stack/templates/starter/starter.py index 650ecc87f..2a44a0a37 100644 --- a/llama_stack/templates/starter/starter.py +++ b/llama_stack/templates/starter/starter.py @@ -12,6 +12,7 @@ from llama_stack.distribution.datatypes import ( ShieldInput, ToolGroupInput, ) +from llama_stack.providers.inline.files.localfs.config import LocalfsFilesImplConfig from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) @@ -134,6 +135,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ([p.provider_type for p in inference_providers] + ["inline::sentence-transformers"]), "vector_io": ["inline::sqlite-vec", "remote::chromadb", "remote::pgvector"], + "files": ["inline::localfs"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], @@ -170,6 +172,11 @@ def get_distribution_template() -> DistributionTemplate: ), ), ] + files_provider = Provider( + provider_id="meta-reference-files", + provider_type="inline::localfs", + config=LocalfsFilesImplConfig.sample_run_config(f"~/.llama/distributions/{name}"), + ) embedding_provider = Provider( provider_id="sentence-transformers", provider_type="inline::sentence-transformers", @@ -212,6 +219,7 @@ def get_distribution_template() -> DistributionTemplate: provider_overrides={ "inference": inference_providers + [embedding_provider], "vector_io": vector_io_providers, + "files": [files_provider], }, default_models=default_models + [embedding_model], default_tool_groups=default_tool_groups, diff --git a/tests/verifications/openai_api/fixtures/pdfs/llama_stack_and_models.pdf b/tests/verifications/openai_api/fixtures/pdfs/llama_stack_and_models.pdf new file mode 100644 index 0000000000000000000000000000000000000000..25579f42582739d5a2462fa8075e8b1842bb9181 GIT binary patch literal 37844 zcmb4q1yo&2(k|}72@VH$cMt9o+}+(>f)iYVYk=Tx!QI^@kl^m_@J^CD|Ky)}cjmpd zSZkl%yLa_h)%8^ur|OU?h=|cK(X+yl%^dEp!!ZFE0d|I#aJ;+>$`;PnCICtqYXcht zfReL;krlwe))?^nUe?ao#M+4pjzPiE&e+As#1TNLWMyH%0F(x>VPfF|h}+qj0bduf zGjakbn%EkfIGQ+8@$WK3+$oXr93EL@!Y`~W9sM-u}ZIQNWWo#$x0CWlu&`DzAzg2M>H zPQgw|x0*M-AJ7gV5eC?iNd-4J-w_D(EycMuw^)W4D|t1G6!v_#G)acK^@a1NKWA!;L>*Ybu?%OToG@6K{ zyx-$xdPnR1z;~EW-#WO(TD|j)cwf($``tSH3r{;7J}te(f{F77G`3g zFT+_)zV|&ZbhM%iJHGPTp#nM#{Ms7~-?UWLsuxw$x9#FkfvHlkqg;nhx9$gpt8HrS z-4ouBJnaGRjtPIrqBtwEsSREat1Eza`IiGeK@>9|FNF&ibc<}io7^N(Yq0c6_=Iv= zt+HVKB}m-}VrAlCP(^2x3=xHNzv)NR?Z;wHfK*A!t@RvYGHJo)Ix~zLJ}vbG@8yRH zHJuqKGD=D6kv$Lz&}-H(JQ)88CwW1sF{X2PqLP-9jf+6-2~rVbp#$urwgHzHE#DMk zFfb?yt{Qpe&9VT3`~1t~r@N|`mrISlV+|Gl7Kwz`bp17Lc;g3eWtVhEa#dz)IV)HI z2-B*C@w5Bku|=h%RODHQ zCTfM45K$rtSt=6owM~hXbCgV}RCKNpuGCn!L9=(7RI@H6pie7tAca2SG5+zUZe=Ai zJ#KF&pq;C$`UgSM^Yh}@Cv6p)zOX|WO-dLAx)_*nL>F7kh#!Wh(w8KDaD|_j)wxpWg@*X5r&7yRZTv@+%y@Rr zpa=RT%b#2G`mRmE52D8IeWfZtmgtCYxKKo>#3j)hXVMyrD|I&wm1daqHp_JwB-Uy# zwR-D_k?-EM=v^J(@4&~PEP1&#H1%REW+AjlUA*U>&y_(o$dri>^OTvI1*|L=bMmMX zS~*(6@c*cc5?Zh7-EmUmZB&BXb&^Nune~-aQSw(@cX?EqQx5w=Dvi+UMlRq+L2433 zoI>}Jh2uV7v6|Q{0#9d{>|4o9NbyRoF8;xty!$B27kN@iBEdKpHEXG19ky_nzUJZi zXEqPjoxM>hSmo+6BmCz`Wkr+xoiIMBK>k{zS%I}>hl-NZ@03>M8dS1P8_F@psas?S z*l@LY(0GMu;(n_NS7K=_*Li!T=`1c7E>|arDvp-)+2FYCN#*kI>72gj)mW?Kd+O!~ zc;lVZ+z1JoZlgu^>yC0erqykj$9C~GY_=nx`ro=~_`M@-Lhb+ZnxPmNV7Zw`o4_|L zDG2v9cFgykp7`(%(&I@)bo<{H z7DR@PJ|u^-Jch3NX5rk*$R-AeI|Vj3b-bPt`aQY%^db2@Jo>$!>j}Kv`@OteJ%?uV zdv{`~pm;vs6t~s9`g~c+O8!tIkEXLgLa>Mwk#sD%(CB2nfNbje1mV;EqGj;SjJc6p zEPHg3foU!l*_y+taB&eSRZ<>AM|r|`mt=~oa?2N0MHFsJ0)7D!1uPLK54#d;!5Lp? zTy4zQ_L~UzC%J@-gD?OvUP zM>V)m_#bCunlZWA3IbC^ENd7wZTp98Izvh<0yPz>dx`bAtu|Rkd64;wnY4; z*@cs9rB(Zs^3(j$-3%5KY&)7yp@Q4=gU!rkQ00Oh-_23UbFw1|>#pgj(@l_XPt?kn z1g);jWeFYh$MIbnG5O@P4^(0|rGJsP#B?N$sMcQmggfhaERej(+s|$B^*DK`dM{JN zg2>FliA-7Yt(k!X>z^MqjY~$G8s%UNVdNg{(v$Qd9Qx4 zO-1RC@5~&k$G%JmCCAQ8g!%H1Y|Nl!wWdg)Vu{l3?ik+E@yP^Oh6P^^yPWqO8+NNb zu`C6nhh4w>=wQUMuk*Q9vvCbi*noXbT8-lXCQ@_!sosBzo`!sUNw-X-*bLx4fWkGK zzf;ktR&lOL5jBPP&g5B$o+z=sDE9}qMIOlQVTpvxHJ*`UtF2DU^0u=Rz(1*;$JRoz zC>PB4%ekhI2>M}~7aIpJOWjrB7qKR40CtVVrw%ys))CufAvN-*y}{0}>@G2&jYJZp zTHUS&I*++U;yuXVXy-z(#BV@QgU2QVTV4}fZH7OCRYXQh27;r@6%;J4a~}j&Haed* zJmYy@>xT)xk4l%5-<}B}sImW&;!5HV<|>GQw|v0OhC?c$ozTd3K>n1Oj6PI~Z1 zE?XqS4{(e~xjfM+{M&5=gKM_UUc@}pstYsY+JyH?ZY7ys5gdxMS5D0^-o5C|X*a&(nDcD!z9GO}S5{2^9b`6>aQ07!22BB2z zr6s&Hhm*2(&`XWD$3uoCM4=j>~uWYJQrnniSm+&tqx7R4pGEc4CuB1{2oyl^38Xm0li@G zeWKC<@u*!0-X8Z$I$2M$1Pz506M`RMHKS=WiVYot$Qo!hn$`LZ-6g_5lX5A|>~WHY zmT-i)Acwr6)4Oc0?{viZq?LXEaa+n4sYQouNnV@ut&S-FfH(p_2e({n&0kKuD~XRx zJd%#!tD|CU5Y1*LZ8{uyA^X=QwfNQoZ42V~`-t537cX+cTKq~(yWUikq~ z7}%$*Et0z;aMr_~@?!zD6niw|O|SjD7u@JpocM+ruc-=d&Xo+xppm(h-AKl0n1h;X zCzCfPZ}4=huLD_*x+Wd62#YSSw;Dp|*b`$(s9vf}vlW$woXuB_J)@Dq<{~+$bzW}w z(u;upo|`Hf)166(;yoK3E6qWBQ+0<1@&nFs-m1yIxSBzdg%Y})0|M3Yo2s7P2@7W% zkp<8V>=<8nW9HM^AZqHAo!Ye+i)?ga)8|L`PRHT7X}A)4gs&wgdEotVntbi`d|#N`XZa~ ze&aofaQeP|QqKt8K3)a}*Vj8fkpwVLWLTmidPwYj;xR%iEd?#2tjJI`9r4Fete)#* zXKL%&OZ}LpQenCof*3-=PeF5m&qP&C=5;wO5}uR>Y7K?n&&DPHC0n*if+({T)f+vv zJclgZV)d?RbNI8!2elGg)U_2j(4s3%4YMsp7ywI}iI^@d%?FJ1X|IBpJ%>Sb!*WBq z5n15@@0PeP?J2Q$Gb1GN`yW2wxdk1H9o%bRgDRx+#(ki|Y=hgNzcOL2B1#_TGPA5F z47DzbF@I}y+jFfO5WQRFyF0Zm7sU=j$6SBGi|cNWZNs?xkSt#T`260mwKhqapZxzD)Kb0PJ)P9295 zucU<{HB=J5F6PeA5HR41BA4>+W273aug~Y4nOjf!BveijoN9TRVD9)(57maCx^YwW zQN4IBqy3Q7onsiaeb;WD=Igji9O*1EjnPuM^9KTO4|7T%!AR&5s3IXaHR5$@R%{c* zC6iU_B(`Mb`42Xey~9F}NDSDTOF@xzP;#v9IN-a9J2Z)%WJ_R!LxN|Qa;?)4wl{Yf zB{Ib-gMylpp)#K>#7rmZRj%7~Fg)w2KvCX8{ zwes59oxj;mm@t`{dKoqW8U7YaO?Y>N%BruDc%#ftaNldewe?Apcig0|Vn_6W3Z1Y1qE*!d-<%pkjii zQjMsx!Vq#5K<@(VYZ-t5yF%(?{pQ+qm(Pyn0JezFseMMvo6wfh2nZ}A!1y_$Xq0ft zPP!B^P7t%;(+HnN#us{oA9d{^88uY1A)rLQ@#$7H@Q5 z>7C#}Odk&GcA23RdZu_~Y@Z?3#2N`W3k`+kO~x>|k{3VJd62CK;lwtirKDCY!{N(y z=#;x9waUyv2W!V5r#GFY%)-y)C(@g@t<3gf3jw}&lXK|L%MLV+ab?nuhNB{zn~3ll zyixC+U)P0}ND2ygX{<|>xO6?pkVSSC=h}V4^6K6*sw0|0F=Ip%(*4DIzof-hweOWk z>7{_-Aoo*WdP5QXa4cfaJMW%paI8vs8N3PF5=)A(&hE5EYU&_nH4?$Q42$`NGdJcA zbCoi%yy*OevF}&rBX82u#MQXdG*>+Z*V8fUI2bH4(`|I38r67qRVYSKWuF$dxH=YI zm+ZdpX^X@A*lmi!FVx>=x^$Y_29K&Xj0}@)5tE+x6>wj49Z|vt>RLmE@7Up<+Ei@ z>Do*Nbu-5v+B^7Nz)>?q8fSk%kp$^~Zd3hi(-AetbB?hWQd#q{)+}F)^dB@?7r< zAPnwORR#pP-p&j+uh9k8Dc6seA0LBE-zH{wf#mBYp3o7O&!UuFmC`(N*z6j7s->ai zz!^;aB0NMS{x~Yf)BE`vr!Q0|)l0iL0dZp<494WkL3$P2)!>f*0rgs%+!a9km{V?6 z(NJ~xWi~uTf~k$#7R#79CMPl27=w<+stO@7_$f(+JEdj$Cc-?^4nKEq6|!_L+!XEc zduNmG$Y&4;*{{?NI-lhM7R@RR+??0WDldM!Mj|H7Nncwkpvb6az95*w4(nAEsmIx{ zp1{Y=rw&_^vK{Lbm$Rr;mt=YR6lh2Eie`BXOUrXjwfmY~)UYUTjG!f~6XxPj{zz<5 zJuB>2_)63rRItN1pa(xcXvLdTWrmzP#CzMM#gd4?b}(UnW*O4*n7mCa74 zLi@q%c4Uh&p~d^FcDk8-5)T+=Y~haHeTREgz#Ex&n+KbhXrPKon+m=tUaK&N9podZ zWMZ$Aaw6$cE3+~fx{G!qsj)~ZEi>N^fJ+>ib`@7vjudI(p5ciRpQxKTf$NtkAm37* z&n?^Hq{7O*7>kFXN=C@rdDsz?k6{ZGADdM5XK{}9N9|>Zdc9ezYDZbiUFTi9#4kL) zliopI69p6T>Ek|fC>HdKgaYwyjk>V+P*_#)jU{gl=kX^~kW_xl-G$4q4fk;y!mpfM z_u*9mG`_lg=1c-3zwgNAst6)^>S3VV3g}?oW}ga8q8@TpIriG?bWM|7bZQk8>I+Fj zO6x|@=7M<-r&1r%c->WCpt~R(riuITl9-hWn%^^~-3YuVO89+)c0K(ZV{z zY~iF6a#z*G_pmdxbvGTqX{A#*z9F*#Hsp}KH(}5x+E# z;N+(Bia{ICEUj;2vUUP0u7(NI$xTFuBlL4>9J?aPs z%(ZcWBi?HA*1rUsk+r_g+){5w1?!gJO&d%ixO^|X8%aaM0b8y{{gr)xiuSU|nbQ2H))shqMdXXr?{SDUQfm>~ywItkUg_j1ForAf7oWzQ{DP#Ek> zw0Rs{&+{4_O3ZPVPU6xY?8dY5a`{hqA)jKchE3I`cp?wuS4+t@-nwAzntz=P#k@RE zG@PJx34P#-D9WIVtX_VYpOSfZ!EM@|UnqgyC&JP0q`J+Lx|`MVJy3S3bNt=-OLsV% zy@9S9cAX;%@zA*aiDt#JKru9EuTNI@bAIsEsdvHCh(YfP&Uf+zgc0e4Xt@f75mKI+ z;C)6z#NwRPJA`N>^anf!uV6~WON2?-Uf^U@!x5?a6KV#hB8rvSIXqil!J>(oD!u$4 zE<)Di#WL~bpPS}VsvEo;QHL0vFWRFyn?KFRlx(D0Z#;)#2+3NzzSjv&&nWWVY?=s- zK+6!(B#;T4)db13U~V8}8i3Mc_lorSj0*9Ht^7$?EerC!6PwC#J;T5K~k;TK4jU5mTNZ zy@ikQU*QQvron&KSNni-$RZ<6zLDbz@99R!>Hu?FgI1AusEk_YpwP6sm zvvvMCCdLY2{XO<`RMy1U!a&H*9iYVs9Asx=25@jO1H}jfXPejpzmNG@=23)WP;z#3 zF>+QgaQrpM2*;poVCV#3Py*JNevJXx|61oS#eUxYqg3@jiaGt1>jH!UEGbD_7&`&9 ze(JBN^UtCZ$3I^)r}uDlakXUOa0t<3~fM@z?!vCf8Z>xz~o7ey?2w;!^>a76a`l&PUgBjqL z_CQkr82@Pjray~(zbyB!LYV(X2-6=z{>`ucMTo-R$YB0M#vfI{-@f_(D1+q>8GjTB z|F1G+B!2>7`sI-S+a#=iNclHs`WGaBFNN(7DS!CZ|0fpqKcsN}mp%S_EF6DG`RN1y z$y5Gb3b2^^k238qv;9XYf9$d}K#S=YmjO(_s0v{EX%9sJ(@%wfmtS7_hch$(S7-h^ zKjiu+LgxSGhrjh?{^|4oLJ6inIKlkiT=74M`GXbAKi%v<+Uu7${$J7jL5yF||D%}y zZT4SAPz3x^0(kl5>wg;hzj5U6xXSbg(U|`mMgBJQF#SO^=Kn^KzlHJ#&zS!kL;hCC zA2ehBZv^>UA%C#!zsUf$Ke*Tc&$FM~^@{~RL$TJcC=MjVFP{Ve55!;e_;rl_6{rBe zxB=YC-&1~`Eh#@Th zcHI0v&y^ibfO&|Viw*FQW(52~{pUITH+%f^Ek!#!phczs=a~DI9{hYD2TWX~O+0=c z{{LJ9@C(4t1mz#SCx4Rk-QaElk`1!0r|i6DMF+^xFWx^8yZfCJrV}4mJQg zCnr5KBO^NyC-5-;D@9@XwTZx!SoE*m1uFP6nPT~6?LQ#=$;f|)AY@`|Vrl_Qjs7zP zR!$arMrJmS-+9WvL-_4PKyZ#Gt^k%_QQ;2=e@482hwwKmv$C^tva$j=SeWQJ85ueL zxAMR2^jrC#J_roAzm@-W8UWKn3rCUrTKSInuumV{AQdbq|&L)mPH!uaZVcGtb^#2z9 zZ;yaZZ{cKbZQucnUB6qv{;`;hfvp*U(!}=9Hm*Nd`nwy83Bbn0#>x59On=|8#mvUW z4h+TrY}q<$gLPLHo#*f^Icf9pI$h5EF|$vWc@pa^B!nsf=C2JfV5(stmmq~1B(H&R zNY1GVquit1WDnq`rjL|;hcJi`$;?e3mKPmOQ`8s@lZTM^uDL^L?mJN{_vxx#*6!tN zWXxQ#UZ$q)cTKrW?!uE}L}ZXbA>_}_<~q;7UX(KkkT!HBOay#B%N4fJ(~QVeXwX3) z;Q?8!{MW!%sC~|zs@r9<+Z})ve`XB0 z&o+2zu#kQWeR|z??*@XeTZ!%06_9$V+g!FU2VzFj&ydDd?*iH=7<#>+?_shNoUCK- z{i7$L>R=zFHn6r}qNQ#^4C2hu`%Tsz`B$~2DaX~?v^0(M$I_9P@-R+k?LYSNXrjHk9T zrCN*~E)F%w^CUh*BYIWl{Q-lxB(C6hF9`Z~;03TbkZ`JiA<}8i$Sw9e9LqHhfKf9T z86%4LDHAP?fi}oV{C(Ltp}l(>FMtUZipGB994(oKcRLd7>p42#Rc{Qd%hc>85$AD@ zD5jA8PA;|+5vNuEeFVULeYOu%@XR?A;BhjGfPFrWEeEH7^XtHYA#fiL@L@nrSo@0{obOuym!I+ z#vNChh6q&2(6O$GZxm3)iK+Se#88`}z7M3d2Sq8Dl!Fn9g6|+#RXITNa3bs65v>JV z_u@Sv4+aYL5-o7tTRU^N-aUDu1$Nol`7}n?MgCwl?^s^ykUV|bq_i}u#HT&K zdl_jtuU+|9C*9Zb1Ors7@-Qo~op)Mkw~E?bx>5D zt_%T7iyQFOF`IGXX8NrdN;}}289z7?+w5fu4e6y*ks{Wa)jxa*SX}i;;YS4_2im9R$+(#iwyx`k)mqu7kdqiL_#0hLRG|z=L=x~T5o<+ZCPuxga z)vR&ZSbPSg>Q$s>#H?nksrv=KHX3aOpl{ldaa7tiko&49mLS3x>_O}*Awh3_7$vrb z)aeXP6}|%d%0t}_yI_Hc58|2e%FO7E5mUy8brA$yri`Ek88P|KsM5Eh3Bp^rZA)ROo3U%{=@VXMhhGk$2;z1(8bJpT61Bi?H-_6WZw!{4)+`rcGe5v2{9&Tw?=@nm8nrrlYkc5sK5h9cgF<0Ds! zHo3A(9YE&OEv2+lc}QP;Jp(yHe#tlzk=zNkxLZnol6ug(&{+{A@3FCm#shra%j(2G z!duyOh3OE)_AQ*Gjo%vGg0&Weq3>c3YZN~s@iIrS$_C9wE_x+!-qHd4tq^gXQT`?hnGlg|CQe>Temh4qRW^o2!LADUfF(~fLm??CcO9=Nh94Vr5SgCqpcfG=! zgct6Wm|an{Kh|l8){#s@(w5vU)@KXj0vlgqV5aFRj8R&h@<|sjDBHoX)V;h;s9Eq* zBbTX~<2<>yBi>{PgAwZ90K4IZy%E~00sN*%>y8VA5cMNg7`A{QDlyt1#$8j|io^*? z`e2VS?vHJZ3!?iBZ`l_~IYsm{W#L+ju$>JG8O$l^z49yDW&tLm|7* zV&5G?O`-+RVnIZ#A;SfK z7OG_wR;Se_FT>pEmhSfI4()bWNB51-)kO$vGE|QIDBMT2(YL|3QMge!ZM-kKZxp4= zB3~;s^Z7n>MVtT-lQb|mcA@Ff^3>b_*>s8R4wl6$d?-Lki*_(Y-|{gjg5ZJK2m9Uw zP6ks zt4Q_)^ZKm)46DIc;5m~~h?hEwtkxb<8#f>bxm)-#cX_>4-- zXrH*v2U=6GA|6Hq?Zq9*wLi5&hQB8$lhOUiW*ZP=Kxa)nf~J=9<6t21lAlWpN9a~s zIKI!B%_kx{?{FeR5APVpp$GHs9$yS9C#p`<8?9f^#!>d7a_~E!LiZ2Q-5w2l7_WPb z9|kE^(8FjX`$=2K2B0^$Ad!;%pAeAwMx}JaZU*Vy-zDjn;Pr4tC#Y3-sC6Q6vp1=y z#HB=Set5u_`i8-ow9s>%X1fEE{`fjJCB7!L(~{^1u?fz!k}8R&5oWzgu z+<4`}^YWc9|KpIH1#ww&Rv`T3YT%X6hIo~vQdFC~P799;;=Ga1AzRE^B-40Bh((Gk z(u5b{iKr*DuG+A968UPFU^R*v-C)k}C)9ct;QEVbmemZ@1tPHg0=8wkccnUgYP;KBi0GTN;x2aX-Nk2Kgz4Vls- zq1xnVtVrMIK!hLk2x|tMn@ej)hJ?PMF+D$1mPz?W_nC~KH=oEX=_9n3d-Yl>nF_5( z*DGalfmDeO*+_3n1OF2PLO1!2$C@ zQQ=-aF4qq7jnVZ0dXa-dxtJJ&8Fjs6d^k1?%)X&Di}lM*WCHZBz3wPjqjqQu2+<=? z>|IM`9QMNY(o7*P`hUDhrO(tTv>vw$C2N*HpG(dV*B&8QgB>K+_gZWP-K zv&z%)d)t?)B2a3V!TFkegkAg;;;U8*&0w9NPtsZQNdLc<{2R|~}fx5O$t zW$nL9tR#0rJxUza--y*E@W$+5AHU{pQ+#3UT5RJqGi|W!gTkJ;?ra?IaK%Q~{>b!L z2Mu>Jh~g075PQg*K6_Mj}=;$6udfxi~3#;>TyOx^wL~inF^qL@-|2bV(A%^1y=W zPck@ttW1do7pyxek=VTL)jYRnWJ}5j<}#6(p4L&#;FU+HaBBchYV^7RM=+isZ=%9B zy?4jR-C8!$%p;wKL}9nI>sTTgF)t2REs!^O=HY7t&X5jN5A0Vld8<(`7*{wTB1yO+ z0))>{SI`>Ku_Slj0zpmm+t<3R_>Lg-?7L8|B(K0AkfkSMgaja;HLq|dt=zeimDJe}2jqx^LXA~%N8p=?Y;bu|KbEuT5iBdQ1 z<|wD=eS))zt6_h8AZy|U#SKZ}I{opD-Uo?2-74gOf^)GhP=_n!$$TZ{t_C&+%?5+7 z05FuIclz8A@m>(qwlzs_W@kwGz}R6XEBDrw5&2r@tm&2+&uD?ahKly(@bvc$%SPIR zuKGk^Y!s~?;S2uoeeX6;AV60bX=4|}_ebGrU~oG$*`C#uykH0Dv+b4mz68asAEkK@EOJSYrOiccik7H8VkD8uNmBHm?;R zT-4z#QFzy%V&TDavJ9J{9@Hh1uGQ!-9U z>J?1*!;5X{+}NxOuYeR(F>C=Y*-@c{=v5&_E? z`FNR-D(FquoA)JR3#Zn0NXui4Fhs4uuW1g;#jhz>tl|cUQ4GO_ziQOo?UF z>3OMdx%qTL-!3GyVwFqqS!elMGoY_|7rA5UxDg(a-t8NGCc>LSzVL=I>9*~C-b>&u zF#N1rY%?|KI zdLY?L*ke=Y#svP5CJ*wL{iybmbAdk8lkNn;7ks}@&OeT;8gF|=`4%WD6#pQ^Tw?6K zx0DPvdD%5eM0Vr_#d95MhMY2zwQ6~NU`oU5$klO&Ob%{Q9>-LDY#>@SJ${AFD?P=e zpsigDKL*wwpaZHSOk2E!NQxLsRgfm;7AiX_7-KCj#!!9!Bt0xy<6EAne`fD!#W#Yx zaW~4drpiF#`_$9o&h#$CxH{;UaH<$vq}G!b{xfgM(6>siqC7BAJb|ryhCuBT7?6Mvk)uJs+q8O-NJFIyt_5U~u}+3K1WE{d zPq3PE3SbHF)2|3#6_rb@3zFJeeF!-Z+)$|wLFD3M*TuF98_AonSrHnPl}*7ZEVWN2 zSX~s1r?v24uZ4F7A)4RjFs-c`xWG+M1NO%#9#+)9sZvEMegrj_G&-tt$`;)1UEqkc ziWG>SL1!=$VJ<8tdZ97pJObz?r2CiKHtRfy+VzEm$s~ax$*m1fv_XeWCtg+nGD;%n>CsIvEX?=Ia->%6406T>^$p zI3XH_nFBaihD#xc5fS3n$l5o$YbLt(4b{x&psisOp`uUxxpPP?4HJwbU11tOKuf#{lF4m}@y|b;62$FWu<^}2 z+^^;a3TOv+yem>~Jhv8-jud79g*EyhWDk@2ND);nQbRPf% zdVV)j&)XqQDyDD4Ii(qu!D0+SGTA5);}EC1Po2mK3?_(65*bF6ZHCUrViA1RGT{R; zV=4&#UVLT_G@oCH^*h!8lqm>u5f*Xy6cmgqfF_FkXBQ83(G{2FBZFp4f%R}oB3 z(U@VtbG3}Jhbr9Wd;V{Qn1M(`v~5{5N|{6roj zXcz61gK0?ii9}UU^a8hNXA#qr2NT@9MU~3^qNUzs_63GF17Jw-B z1U)P0*XxP(ffK?qLEM0{D6B^xuwjoCQt1JMu(WBev1RyZe42vM~E z`cS1l?SO;*+YNVf34#z$pApspDkH)jR0jB!5DV9MfNAfNz3v4|3OJp_#I`$; zjA#b*uF-=i;~NM50Z2x^dn(!>?63%ZazmPdK1P~*SZ#z_l;lplUTI{35NQMY4HHJB zbH_%QbGT@=Ztvbweb9}$Dn<{OBuDE&)n1b-taF@3&Y`R*dcoG{O~*#kd!n)r(kN=( zCSj#(j=hKL;akvEI29tTpcUPp*un@qqwLu>WUJ5}=xBwtL*0>WL#&YP{H;*@AQ#Ax zI?M6w0=L5s4ahbs4KQ|oM7x8Wd^kio4VoLeU8ipZJ-1a4yu*#Y?UW6})u-DKQxA1# z*zmF^T#0lSSP8h(y2IE2x3{{`0g9Dw2S@7i?3F#U>J8=M2m4CG59bEphjJsb-MQmw z#65Sodt-=qA_#noy{-qUx4a6)0e2x7eDlIm|sZw z(89XMV#pmBum2sfwYWEImdFY;ei%a!V(&Aao_}VTpTAC6W%p@9WtSE3dUyui+W|aS zIrQZ(7|zHCI=W`U?7CRO?0R&2-8kFf{e--cvP8WJv!vU>pg9SGb`u`g!QDa6h22rk zNuop7$a-T82sbFsm^TW~q%KVEA~$!>Fnbx+=C=6ncv?g5$Od}OxHr_!#4gA}NmYroQao8b4472o(?>#P3Ip z$LovS5UTw@AEANoK{v?=0H1Wd3Z=fxzsfT|U+Wn^Ffc!N z$Q8VPJ=}k7V}4%U-v=%W97Yk)OKx86P4#h~l$sX%~vm+7}qWIBLsV-+~FYeCok$rk%Zb+YX&sSg97`qm$z{$8= zt}t;a865kduG1)F=6a2F2I?Lg>=D-gf9@N9*(? zR>iL#lDoz`qTEKhWjfdPTomiGRUQU)Vl=tE2`(LF$*+U^L^LB$;FpGsRkHX@nMTa1 zW8`^|iL41-U~kd%M}weT)%c*$opAzn(Oq%_@&)sfuejNTf8iSkK@0_poia^boEpSp z!C5-h1{65Sm`3zjD=s22VU_2kbQ!Xzbxo z8`C%@wO!%Ur-i386I~jBWivBNjbWzhJQJ^gzHWAxUgzjB)33{Ah>_oD>Fw3KY?W#{ z-I@vD=_!{Cr_4uj3L!Jc%j#2V>}z}<_@D7Iz_@&O_dTZa=<-aULBhLzzd$o(CM0g# z8!u@$de@+1Hr90dQ>_W;#2CIuFV7vU(YT@9yLwx0=(yH!HFLYayJn4y(45+w;n+A8 z7<~-Oj%&&!v&)G`pWJiORn(L8h5)(|Pou)NV2ugrLgK@HXP&*OEub zA0KiKLE>#@JnMEGd>BpOnKC!ejm^BYxi}dJ#6cW+KU7q0ezn2qshJd`AvXmBgMa{oa@Z8F9~1fxX#d6MiG9zfCEt#| zY$Ilq^l+yq-wwLv_=L>Uu^Zpltd7Q4JIsJ~p9m0UO(oU0(;7#mWr76|Bn3_vm+xUU5*kVW&D3!a)fI>QIzO4KYdt*@7az;O^*Hc#u@U z=Slh1!tB69&^K>XCS5>=8o;47ql&&T{oG_F)%iksjfr+FI+`OSpOGAS4F^K=y^_Kz zMoslSO4>Tjy!mfJ&RIoyjMF?wW;d9(gpGz+9JV0N`0qBvkmw2fJ!@8dO;g5;%gTz2 zX$ixohO@32t57YeX)F~5YWj30vV!@%n!Rr7O!};ay>c%xnmylM(Tu)s-xj`*s6i!6 zl!sR6vB5*cBI=j5;tg{ZI6d4FvKINxF-G1n*nKc~Hc)sy5VW^d z9zqWk5*E~q+=7yds!F1g#c&B5H^uNW{_Wk&|?rB7A;HPJW& z$7&EqP+uV1hnn(;>x+~@>+b`I#nV^q2{YH-F~N+x_2=c(=1 zFCUEAC|uchM~PxdN#>~t?n$i=!K&i8`koPTr$=!y46SO){`7b;-e_z9 zkt)>c6Hbvy5uPeH5?X0_vzmp+sbi=P$?omGQj^c>p3`v2Bh2FsT`LiHH_Nql>*>2` z!QDv@&KS`WGf*ACB#JS_4`zf}8oEv}0*9`slSPSbh1v_)Pm!D}rRZ-BDZE1Qz%4-c z-8W#_2Y66a4G$s1ifo5*v=S^gHUYN1Pf^5n+vhu`c!fR1V#h0A?Fd_LKx$?rEs$=W zZ0hCgSaG0bL83ityFRdrx6K<1rRC+Mm0>vbeQ~C16QF zDordXnOL;NbVSN#%#|NvY=FR%_<~3)W+~>p0WsTL^07Xx8jO`h&k++&iw&oVLr#c` zugPZ3YSZu8V94-198?X;m*Xm!YEepCIX5zIt?&dNKq0W)FZON(E6zplq!V5244oY& z#nGCEq``bqey@nT)X>q)#DYA9(I|ax_+zDVW-eg+tF&*Kpi4EFufWLl<7mnzN6n81 zo;{_KxC5OE{dd)hI0%5->~lqudH3_I4VT(?@i5haqC=Q5U!++!2YUCr%1% zi#2-?9hqOMo0qE(l%e+^FtsGTxBT{OtJM#Dk8GbzEwgQ$Pv zrD1yOwoL5KJ+y*-)~u9gyiY`YST4^i>-{FUB40%SVs)V^6y?btpMk;?uXi~mzM-XJ zkXK>n()ND#b4i~Sc3)Ne#Z+KDUBM4sFM#^ee0G1hOBhM~k4 zqlTj{wJQal8qpanu%Td;Hrq!m>*}!QzN*pyDbsSR*+}3EEWxUh)uaCwfzSKAot>pMuLYLeC5aE06!>MgIcP`2a^CVK@?g$AiW z*~RP{Bo1w7*U%@-9b^rk*DrC|a|v*aO;C;UiNXbda;)ZfjkTIhZFTj^o-q+;)?%@s zNZ?=EHznV1w^if1d0jP>R|~nt)qoak zH|H^q!H!I7E{~UREbd54(gK!kU{v&UaRXz^TsK2sSKQ0v1sQ=`%GeWk%Cu291%X9)7bRoY+>fyJU{A``k2|5%& z+ZpTWViu;Gu)WKY5u}XZnHIN@QCx&fDS^pk56czu97a~$!E1CFS(*doY;4~dun)K8 z+QUo&AFi%uLZ0s`Px6LnG7ZtB8gghG+b%7c<&eHVOGDz6n#&T|ZP}F$zQH(nPfzzR zb2GdbDM>dVvgP7ZZTAJeEC>cmSWp~{vJJ^o5Hy0{4MuFn#-60xNM~fcKO^J) zOvdZkEcsh^>xgbAoe8mYp;)F+j1{u8La|nh8btj7^;(N!p_UGEv+5~xa|!9@^3csC zo4YlR`dbH`sXFp_W**PXh)V2|0`^B+lhHPzhi>Al0nGYh9 zGq}2{(hm2Gg|M*c2Sr87k!ZAcMSAy;orn<51x(4ZJIl+``-@G{))M?mS2Ct%vF7duN${))8=cIJ|tup+&*XH+XpsY@aux2y{mR5 zfsj3G|47Eqy6)O3(^g)0Dmh}^s0Z7wd1MpYcXpsCS|LWc4N;{AMF}*rj%sQ|p@wKg z)FDft7AVuiJ&h zK-xpw~&FXf&C%8d)|Xt<@N1 zG~rl}EnF84%0@e8Q9)p74wfbaMWY?B()lGx>(>aH4!S_qf!6rBN(rSqi2*0qpxTT< zm>~;eM~nN#1ER1_Bw`04s$sNok9@!=tTPfLiz^m+zf70PZ8DXAWqISx)NFdtYGgshMd&DY2RE&mH`eDBfDyY799 z{Ns)xxxR8~=ZPU?SE7ziBI|cta^nqbS6Yud=;z2^R>*-uvU~@KI8y}%gJyy#4hc^T zUl4B9U8mE`_FgP4(6t(_m98~t@|`-tonPp8=IAo}k}k8)Fhb4d`j9Tm<}WNPC;(rM zA4fWX3Oi`s9mxd0!kv*pNE}y9MkZUL8Y2_`8dPu82$TOB8 zT%{Qw$GSq(;S5w?fRN-nzmdRE#Wo?imWj{67bTSocB#)2aaxOhVr&VV3xNFp)lEqAg zn6(-if}r-CwzbNwp)zNeL^IPc36-U^P5UNCd{+H>0exF+!{ z(tlaiSq($3xhL@snRi}v@T9)uZ(EbtEbVOCG2^@k%ky`&&D_$`OBic)UNB<#{DPAk zWnyexs1>7j;V1au!5`gb;7Q z#i!22r_RNv&J_)635qwSl^aEKgHdjlK0cgy^RCCq;HTduMg4Y{KmDtRKbk!6 zlNI-WNNKMlC;vz;di7&6Vavf6i#OhKU*gEkPbEHIy^HM)_n`cqgzL)!ImDydf&nsE zn_3T6#cu(vE5pIA3f?Mkg5vSf7f*3nfV2@*#k!Hs6E~m?BbJ!{Jdn8nrHV^ZW%kLrIJv!;? zrp2uzFTZ);^28QWee(mo>PP(g!jYR3FG)KcIcJ}r*uVFoMB?G8Wt;o-s{j1KPySWN zek=Drlx^()P8ndE>d;7jt(JZHP-KIz*ZB>gl^M5{D7FfDyl_@fA2drg#<`KsZeNQsmiymK_`OW)0%7cluV59QAels^$NQuSurvmLZ0pPZ-7 zA~qo}4?;Eb%aFoBYByXl$Tjad2A&qtgCrJLBNs7Z{c?E zA>LE2rKAnwxB2{WEv1~*O&7K0ll&Y$WI24ua`=$tM1y*w=b}c>kJ^ZGnKL1T<65l1u-)kefbsO}N+mY50!Pd~xs4H+eNyGa&sdtWv&^kk{rQ>Z*7Ax}SX`P4 zdlaO?a3mGx_*0?Vn>qt6G$|E9X`S@Agbab2<677VaS%&Dh0$;Tj)G(hV&Xawq}2Hd zTM6#;3i&d#LcYqZ5XN3y?c^)uKJksFZgO+*xGcyWbcO0SaDFiG#bw~LYY6_|YG7ZVmaUtZA|5-v zMRlTy#mp?W9%^vN5tzp{$h2{GM2VrBpJ>TirAAwWy0;s3?`+h)nG*cBs;1Q00gD{n zYP7OJJc@RewL+tqtrc3utW@Y2rB{%r(!W31bb4ah>6cZPi-TF@sTP^f&8|kKdVjkU zNoTc&jkUp6tkevjr|i5Bpub=l=>!CFPNn*G`WOk<=bk;vE3fQf2imb%YDyt}l%xh^zSuP!1&$&|iKCWr zWtdTvZ<{F7*?wX^ZTspu${Ev`Jqb|*jEYLc7}VP;EF2`G0K|++iWapM)^S!FCwL`K z(TQLKg-%pW>HS%gnGjwrumh>sv65q4TSL}Qb*8zStwz?FkMQf+oT5*YSh4Ifg zl*Zx4;TEAljF^hdjl#L&64RyTl_sr$O4@2uAM+^MAk@jKc7$n=S${jdLs&1b*KQIX zk~KDJF`G*zigJQ#jV4p6q(xF|9BUa%R5Z4=TJ|M&_61Od%~Ol5%|>lI=_W9dUhPs) z+d+D%PNPoG4#JTxuYOo!DVBF<>d$fq{i^CQNT1$5Usb5VR~%h|)nPJ{go+ zq&5kqINh|>+7HFDhdosf00wEXq$p=xtch@Y5mudFtgRdVu) zVoXh+ipQGJ%EzUSn+99@xMYaa)u)n#tYIq&6YK3HkDOcT^zxZCdy5B82ASnC4fUz_q}Ra=*~F}*j~M7zOd$QB=V zZ8tw_7F;fOHVx*e)=^obT&lNGYSf*hOt4PLn&g_|p6ER%`v%t?RPp!)!R9yU92rH+ zkx{f9YzDXhaYTctfU?8LuE%|By~&w9Q)Wl3Y%wD71!rkqm~pJk6wX?{HoR>)B*(&~ zlZ8ts3$NS~Wx`3DI+Eg=fuJh>*=- zCbmAlNr4Nr^8%udKjPMMlsTfpzlU>|DRms4I`-j&Aw%H6ez?`Ty&+ta~qy6Kmj-OgZ&39|qBIAc;nQ7H9<*$ODnQ8brS#vONL0(&)keBPG-466Q zhq!+%hiK9q^8Mq2Fw?%iZw00opXZP}eP+NUYMN0{LBmLO34d;!dTcDAD#n;6*4;r` zkyiCu)lzLy`dV#$nUo|8xlA>q4(0V!TT#ujp*O3(Y6bNvB{zV+OOa~ocA(~Ry0XGT zPm0VK=8Vao3xw{atM|Ti`3tX&=rQ5!+VRN6V}&zIf( zMouJe6aU#nYSDw{7jxd zKT+5OcsP1FnleYH#tdP|5WWnN%}(I@b3Khex1$^h$7#Yeu~k?sijn-@LbY$OFkC)6 zr#?`ZHza?Y&?HaJIj6_`Zf9yZ5$86uJ)q8TDTLxlPCrwXTtNQQ`Jhym8@a`_-m>1|PpekNp{Ol8>eWRHNED#JYZJ?QMPUZY0aNj1+1F>&?19I-(&RB(9o^*)7?vvBhWUGLPSM znKi2Gdi9K-XzO~y zVH?j@tEX4`)S-FAA!P8wO8Ss9v(KeZIu*1!4>35#Mgi9ud+@*xn$b=H_R>$rk?s+` z=&~yPw7e{R2A7x5AG5LNY|ixQsKf4ZiY~sWG`M$0CqHSL^5?7OKQeCgU!` z*)J_K-*D5ib4Qhz&+XCgi6s~BZ(aO3TUn*JkM878z}{Cq8ZN(Njv9W7Apg6w1Pwn$ zkbmGT!OAF!`f>Jw|4T<*2W{ONOg-^=QWGS!ggxUVj{?Hqu;`oK=dXjUReT%E|4Oq(~K z_=Y_-L`^OiN5D>usy=FI6dSeAYegsHPG^>*qC)Ja9U`8kU1E7q`phC5fm+$iN*bL# zvvb%pT#3Z-?X36qD9xN_uye4~K?haRr(g%wp4@_I4mwHDIoe5EoC}<7PQm#D3I;zB znW*iM4#bbeb+Xk7t?l3Y8%maZ6o?-2UF8eZzCx?euRR0T6az3aO&;E z7&Mb|Yg%#Zjkaa#A+J?zd3t){lnwdK9Usrzptrq6{@r`mTv zasJc9&ejlsyoBi%^vWGzNhVSzMC!20oKl1av9|*{(-}fn2G>YCQqRc-O%OFH`?T5ap&9u7mT z!)-&|jc|^&(RPkoxm|m^g{FmOxk9|2n4?0fFxE+R#s4Bajs*rwAgrs z!y-ADF=^8x|57c}ENYo!GNGA2i_BL2f`}%YM%HR2J$6^eehbEE&$8JZPM6z_`hCq- z3EV*z8f{hCQ~Wlubhd|mtN z+y0&>yx-|feEH1{+l1et{nxxEIAiF^`1R`N5^Ck}ufP)RqbMLLtN&|~rz7MKbP z(TI0cX7?=|Uf67EE}UbUUD#5(+O(qJPUl_ThfR(inFoLQO!9dc^`PgG9@{-n_1Np# z-{V!sdp)#uPQu3)$UM3g#Ek0$+)_*1Xod!JITp8_bWJkC+8>*J}%A zWBPsOFEVd9A5%l@-B2@sLC36NukV*97yqYsybgMx~R?j9Y-wUMy2J`NI{Z z)qASx#%faSVjY;vs&?@gJ6sWWZVA^QO1Qu&;ex0nZ?|T@h6XgX8mh7LP0^luVUh2j zW^Og`SE)2^ZZ7awtTfyf)R?*Hz@KtxdR289fJ`tiOz9Z0n1Xu4aW1O6q&Bw?V;_Ia zB=w|32yL_(m z_s*YOHgEQ<`=UA5P8jm&@{!9%+RY|!UPOOUasQ@;?u9ors8i4CdFj!U%llW6_j>r0 z9wSPIwVXSu|0QfSt-#e3U}u$(L!MW!Ad=CNC-s)1@kr=>qBPoPUZZ6rx9&6?GZ}%5LA>mk~ld^!~m%!qliG5GQ!pkVQVUfaX8_u zN4VBOY>f2?XFbBY$atF@bWoftNGEaG!&-n}f5SgUdTd-hP<`sm& zuk4DwU8;GCK~4*}=0)j$S!|fb%7ae17?<~qZ#X>XF}GH|Hj@-1X#sl0)>55xVU0 zd$Z?W_{?i>JbewyoFT|d2U725k=^Q*dMcVCrV3M?N$PFy?K_8#*N?T2^Ub8wr5Uls!0Xaqv)=Q3ob|E&i0kj3kGX7d1_CiJ6G#nSCY|J-G|$x2*_ZY}S@NI5*hxWPZRR^Y@l_%lqX6a#9u^WZd zS5&KJ>|OfTi|4$4ZOi(St)0QgE?)A$rYkPJcf~zxPTs$P2&=~oq~;StsO_Z}{`lR3kQ_!#Ef z-jYt(k9);lw!WTTzJaz8-hsX`w#lBczNxl(-l@JzHJ3V$)8lRhoWx>sxkfvg5hyr) zmUYTTg(`}e?bFM!lRm;$Nk+2oQTbFUxS(&#Lb2dVo%f&ldQ<9P+ohUNKjP-0Df2Z3 z6I1Ni$eUPIy8Ob5xQUp&0rq@460Kn2lS~Z?kbslz4U<_#&hnHnO}_)h`9R5&)x5%r z%v`5deM$lZyUn|g&wEN%`1p)F`RwDopSi;{$M~7%AxuLaIKHsELCPNOAL4SMxu$br zP5PDI^t*ig>1{?Vq@FL>W1%6**dyV6^!3#6e^BEK$&aeuJIZlUaNW zvs5wtf*l`BmocSciXhNi2wV4iw)U#3;9pRS`2rQ|fOu54dOfNMS1;K4SxFCmR+7us zGZdvyNxn{>CEL?y$zSkooWH9A?NMZH(i*|Nc^v1Ai8z6zHuBQac$i^{Fx-sQb3 zD%qhH%4sI19qdad?He{^d9Ph^_T=oUvaxmh_X~HfSva?1$T_y(=!dkNzvk2hxbg-i z#t2{F%JV}Z`IFjWFi7?yL&Sczq28|1<#=+63{iVgxZ2RiewJZ~eWKiGm}NMj|EI&; zGhCEEAUq)d?EG~_8;j&Vp*{t*MMDfjLiGjXL*ol(%hN*B3R;TVir&uuEc8|QNWRtO z)HpimmTf(JSu)>>l^~Szjkpc=z$<{3482NKB%jZsug~=v^-f25q`dnVy}r)8FaE8X z&%dA)al2n}5yhptT3l@|u?VMxPAKBi*u|x>t4kWYxHNV-c^>=PA(zIil!nDq(%6-H zgwBZ*nYDgGoy9q4v4un+H^7-Gz?mt)nJJLB+p^zsz>>6x0ZXlA6lxkg-NI$Hg|AVI zm+^`vmw!3L$L?(5vf09Av&9oDS{!2HIX3e2GF_NH{#3e4XfC4p@;%PJS9*wlWsQY3 z=|_PJUF_vUZcF6jLZvAw?dn}_<)@p`?%yW3;BiCQ;Kf(1a+}GL_&Y~0`pb>GE`M;w zI~)J-#T^e`b>*hbmtVT6(K{wmHhofM{036<-tB~}xxMYwoNxDE`lwL&mp!{*dgj?@ z*s5I#AbiG8^={h%PFzwB7kkI#pvn!eh}c`G7j~LNp5$-eJbM4qfWZOxzQPSCY_>_+WA!5`IOrE zL~k%&_Q7ThD79#9MjL9PN*&}EmEe1mNf>%%yY?<9RQSz2g=Qkk&dCG z-Q5+pg|YN&CrBMTlL0bsU#DyUb}Gc>gjr*jBW8^;o0zmHA%LCFEQ1*PzH52PG;lf{ z*07aNy~bf(x$WvbOMcg|?c%wkZ>&K(;OkqOe|vZ56ngK&&=h#ktZ`8quCDjK_nrUmYv?W5O9JqVe`QLrE4Q$$#Yj5*ZOR5^i?U7WQ9e*k zDiTo2l?BQg&9LL~4R(wO|``+&EZvOK(-#k;oSD!gV)N23Zg)3nTn@tCfau<04UJo** zd0}wFi%Xpi_Il2+#k%D>_;|(e-S6kIKm7_o>x0w#14}4;dwaQ{TY8mtPH9Cr_ z%=~;NOVxNN{F{og=SorPOOKvx(U|H&&^n2_&N&8H9T-r%p<%rxdjN1lw#;hT!bzwY zQWAKJ=$u3@9bwqa#3&8cOGK>R(_Q#}W@LHu$nJ)VH;(5&ee12SR&F-NKf-sM+4AzF znN&uSwZwhN(2)Cfd4oQXV3Pxar)I_ZY+)5u_y*O!%$yUaiqjR&)c;#JC2~qoW8&cf za6l*l!TGh0;x*TB%P?}pbcjJFPwsGP8Wn2LlD#9aCo0$z?mF^}$nyd(HY;PvtcvI1 zX?PiT8$QH+BuY<-uu`m+vT|O$Kxx(6^me{ooF=s^cknBO&B{LU&-}aMG4WIBf5orl z5(i^}<9I}&mx@dd6j{!CfJ=_&vmOW&lf}l%kSrvD2JKZ9V15rYT}9woC^jS0qs!9F z7q&c5%?4sh`*=&neJUFwH48kZ_L$UTVU&fCSaq$SjR-Wd=-l&}U}kIuJjg3spEM#w z1Tk~40rT`hT+=_S7(1(&p%_2;#u`miE<8okl*FYTh}RQuDVnE1D?R{bQiP^N7)&0` zv8|u&&?hReTv^8U z=$rqxJ?Xt1cD8$w&9li>-~%`tAgqKE5jOCbpBCo87mpMkyHnWv^>5*}!qUE(sQmT< z?PzfqSpdxpLp{mWi?zrg&Eeywi}b`G-QblH7i)q+(%-A~6_~7NIO*mKJiK&cleZs! zzPHbKcxpFGj1*HS7akQp5coE-IVo`E!UAE9&@b>rt7F7@ik^NFQ}`vs%6=5U9&nPd zQjymG@~qY`Ez^lcRO0~Z~NV59EA57*43x6v8X$acKShJ&Z|L7Fl)y?46 zo)som6I;F#{rhtuBbCROZR3%#%?EJQTNrmLUIypwg)==uz#7@T0g4mYWwpdS$k8!A>B*OS6!gwgcxF?eJ zkjS>}1LVKenZf5cGj@?P!~4>$$92wijwVN~Q^n96&%Nno4$hzxIj_8w0ZRzSoH*;@ zL_}M`GL|qWTGh5_B%z5NpJ8Zy&_b$28Dsbfwj!=r|M+c(XFPU=g}dwKj$8aRU%heb zWeX;axT9||y62Yp7eDewAFFX3(|??=Av3RoIDBzeNsQi4$m<-z`YRlFZ=zJ3op2-^ z*T#zD>kvAWtE^6^GjdgIzA#^zY0Y=$M`p&B z!4gFj^lQ1Rg{$$k+C1(i;U+v!!_gEk*+ftkqg;9e+{>44h`FLwNfzNqGS-m>S|~{@ z((LC_f}|KDd`Pm_5WpeELVyJ@f!|t+SperJXrPiQ zz|KLOU@gpb5;LoFFnVfkc>RsHUU~gB@@3Z-X7d$< zD;!V;f8s9Dtn;i3t%(-jn%?_>W$B2L5v7-uE=X@k%PrBC#iWV?K=))DPU z&XMP8v#t5jxyhdN+u_5p!-==cj)jkv9ZmPABb9v4%9S+nE!O4y1Z%qWvHE#w!BTC5 zi==3xRE(qy6&Ue=A>si;#QkCmLRB5W(84aBf!AO@&DbLC`dFn;T$RC>VgYt3@Dv8B zFubHGro7$6a7;^Jr~x(jNeD0f;H4t60DOBkG)RJ4MM+R=P!jaixk*q4upk1TB|*!_ zHpk$gEa*TIG}n9VoB2CaH04z{0SQyW*}@C=UwP$&{hOXBoO$3oEq!OWC-=Yn`a7?`_8yf6;|jC6qh#JK zkb=wIc`C}Gy4WZ*5v|a~){@rv#Q27?EoDMus4>}EHYPMCIVChDd1L6to!k*?p*wbWTPm_f`%{?A6j2_e3Fujfk zqWAJnCgGOYY}#!$;WnFmyXqVHrsDi#6_YnCzA3TzrWEmfG%x#5T6S(^(+_MGMKyz% zX7A{4PAymnZ9fq?(hp<~$~FzH+jz|jh5x+u?bWX=e7vvX*UJ|_wRP!jPZVY&dDLV$ z3`$!HcRlsc=}Woa?BDPHlQ5_K3 zfT#+H%7DlOL`6WP>8;+$Vtko!rbU^cjH#NMxhb}8;`W$(VG6&C5=V){E?&|IulfrwehK#J5j@#9l)iZ0C zR4%DnQ}rYKbM2AZjYEGt{7L+*_C(E-wL7X`sgBeJ9z-S}Dg&Y_ApH9kgIf@Tn-YWD z6se1J#C6K@)8(41hWSLgx`bDUl_qG4BNHFW62$S=cw7AX_}2KFanX#I$8U{)5a-L| zKa8XJ3xoqo2tTsiqZ_6#w`hTx1rI<3paof-_>OR-k>w<5nI?q8I!fo2qS91}#Cr%W z7Ipk2h%A2MhA12HsbOk)0w${BZYb6`g1)_p<+ftpmaf)#gmPj$P2U+$(|5&Lj3v&p zhx9C&`@QH|AoYK-i$$iY>c|J}Oyv*M!8&?P`u@70aHB4eDRmw?lo7qV=%>^r*f*%C zscW1uqGtqZ9kFHv8bQ;mRUqcMCXCZ+G1+%F)4p&! z?N&39#|jkv{0eLIV`e@G0zmQe{cI?aLdDHYh!W17STs2Rf#q@wX->4rkvTyT6FJ%j zaUp|%QGcA`(dllW3_j}Fscy}AWtBpBXtixQR>N-3sjd|yLK%ol9)6G+^IGn2l@$6M|B9Z#!x)(Q2 zoxSglH{XXBrsgiXY0QO{vFwQPYbTFgQCprHxANxLRUKC~SEfosxT@jeRUOkG{qC=+ zY_0150@VqdK@^tLzJ;FquLf}dIUuBf5Ca0UV)tqqh3s*s5MoUn0;uU2av)+UITI6G zh*M1~12Rw_WS(HpJkemkBwwanHbYtftD8`qpk{eF7Qs8(E3(jJ{#s&~@u}?A#g3)s{W>sCCgzkE0tOgwnji z4<79M(WIdX8dW2*`Y2ybzQkS6fdmB^ltdd*IuxOWT_@eR6K>3fV3iz-XfPB}i2}39 z_<@E<;Fm@MBO@~4mqxO&D0Ltc%sY)TzclKw)ad|5G|H4$bigBxhW#X}pIeKva7EPX zg+zv|KMH%IFgiIwC$@$>r1u`SV`;1gaN!An7SsRk{p3@B0s0wt{+P>w3R z5-3t-K#}^{KFo4_^omS>D$E;ISZYm~93K=|^YdrlbUQtz%D{D_^L}$gvX~RRW$30( z0p+wY3~@bqEeUkl_Ob(Yb!&;8Lf+}4Vrr^cQXtBfB1;f(t*hR-?uj-_?N;quuDtT0 zQQbf99yhIp%@TQDM&_LC{o+@jaX)V?!SMbNh3ko)#G7Sr?BLu@jt-2Tn+$A42|uhFAk;w^$*H$opxs zug^x$*!MIwKcd|m8>U3QFZ4g!rB;!rdR%jgj9)TgwaV3)Yp_#^+W_`Y(SKW_fj=@m~a zDV3G_YHVwXUNMAc&9SJPzqXUuFnI^qvLt6EGi%WBgduS{)Ux`X{&(GGsuOgwMC(`# zLWkjyh*i59=j^Nbt8j-s&t7HUZ(|$tWWmzu@FwQmilX{lZ@pJ_XdR>fJZq2qPr6}_ zRT@cxg0W248Cy1mk51en0EZZ@vj*(5YF>4vtUi?d*&ZJC^*XlBhHG z?WIS)XtLS%|514)8P;g}pfr6<+*UqiTDP9o(&)GSM~Sa<6d3&ny1{TWP1fHT!Ke}B?d(S$4=L%XneP9>y0?20*NpPS`8csj4 z3tWKqdRH3w@PYS5+57DNqdRb#Pt)Gs?d?G;Xruq&E+-F$k_XMVhG-77JqSwWypmGZ z)nc&q{rCQ;NM;f_6lE1rTnz^&7M|bxY%AaJ>~oJcUASv&q5Jt~hrCBb^v{mjuc2G| zHod+d%{p@kt=jqZn?zQd#47kNBC9O~Y3mJNR$BuY0vDeJa`-yG#DuEIBSl1_P8i!{ z4xd?{W4z6p1}1l#4m2}yuX)cE@%(i2C;U(3&Bjm79-&9yF5fP-tJ92)a1-97?nZmGKdP@8Z&-)8ca*pEzgQn*Cs?DZ1{~Xr>BK&y zwf(L^DN_VU2N)xfwR54ImCO0Xd$UB5lVnAKqNoTwN6dYbm|r?HP2Ez7C5UvD(=05S z$i&t@uunl&7ARo=6b|Y8bf{-FF05%BRuqmyBJn6S4d6BhI^*@#S_Yfn6P492CR%v6 zD_$Y4VLenYb&WK)8fDtZV2`&~z3g{~XNsBVW$R=^@eU4N-!U&&|c`ITEC)jCHEhMlM-ky@uy=Ru+6JHVEzExn!KNS?RD$DkUvYjvaRO5!b<`a zp@KZK-5Fp;t(X~0aoFTkE)_Q&Q58e3W2RMCOZ!Y2ug@h8Ct~{(af|L)Jd3Hhy6+ErT}*Fo1iFFMHC125TS!VI`9iRmdeT5eh! zuC;006cgV#s9_7ZNT{eNOs@3A=jEsPV1n010tAVwY0 zfOX|!YzpPw^^8%8ls1TTj1lShJn`L+1GWD+ES8Po{v*4Z^ElI-C&t+Eoh5nOZ%xro zT`Ahv%jeS~^E4N#@$jh4tTNO$PwDR?RA%lBj~Y69T-2@>)WZCi4(BrExsSRF^DeF$ zzG`Y?;pS(p+N$JSvy`vx+kD&FRZG#_Gk@HAN&6Ja%e6!*9wfYMz~AkHG2f{4`^`Ih z(5o_X;0Py5(N2GI6+*y^${6FG|McYB)sBDS7#y<@caOSM6@Aq!AHqq0pk|N+%Cd zi-M7iaTX$?%@L$Mp3(P&9JyosJso&I&cTe6oPp6EXm%6bE{M+~`ny#UjRCXFT}Izx zKw!AW3}cOP(h!*WTSZS9O8{OC;Z^|Kz^$Mka3IF=ZE@3DQbENR4YqqoC997T$8rp0 zZuQX^)@bYF*3o?*vziXyy|9dz{oY`HOo>eP&LOZ`rm;tF^kh1gpk%X7>H=))jN$=Z|o*_{g%p>sBg1qch(H?&B-D)AYA;;fSI}=_AqHTMjNQ z6n5<{6qX+3PA@vRh)@viUi|hV@P8|}tGfziZe(+Ga%Ev{3T19&Z(?c+GdUnIAa7!7 z3Oqb7RC#b^ATLI5ZgfOtb7OL8aCC2S3NJ=)ZgfszZDk-YK|w)5K|w1)a%o{~O;idm zMr>hcb09PdFF|u-Wo~pJIWRRYGB-0cGzu?7WpiU?Zge0mGBGhOIWaUaHVQ9ObY*Q; zAT%{DI5{{pGdKz_Lt$`8Woc(f7)J3Ff%sJivM_@_x*QcGRr z;I44O-Id-|DOHowG++%ewis&A1}ac&5fhD|;RD2=wPH;4lPGF1{t2NG5Mm`wjn6FJ z?%r5TQHxqJfiCeC*L3%@RB*psRp>iOQM+ z3=qgwZipq2i@@bD#-fIy02YT%qAR+hL}jKMx`a-)xlfr)Pzn}R(w(fH0#U3x)=kNs zdWk2_9SPOR=c=Au&Ql^8DvfegXUqZ%N@9j*X1a{`c)wH6Og*|=nBu(H`O5+)6JgE+ zr*=z1zcFz)nxpy7QsA*ZZ#oNy1#4M+p@ojBXepqSgiu4au#e3aNh2OCp6X$D{U6YW zQb3PfoR2m5{WE_j@hf8yQoOJ~j;n(Mve3$yB;N5}VEetp%-*>C@> z8>&2V__d?w)2n8Ow@v=Gv3KCJ>SM3=ZT;%VqNg?-t_%Iz5P$jo62C4FD%Y9?koTLuEB$g_jfO;E&Kf0-pwOh7Pa%zN2J=HN& z{^X6SFOUCns+q_BdOLA-X2;a-{EMxZ`cFps=hoja6H6|80OZ4TB9gY%E$L=Ztsb8wjjHARr{eB5dXd++)xAiz_4+4r8x}jK!08X?6QES!&*!0qnZj(o?l};Fpg2~ znMa{Bf!`6GA^couN^il5!yPYphvIIuk$U!pE^JvG!+aspS+EZwWro5+H8u4O4~PB% D^{*Hk literal 0 HcmV?d00001 diff --git a/tests/verifications/openai_api/fixtures/test_cases/responses.yaml b/tests/verifications/openai_api/fixtures/test_cases/responses.yaml index 4d6c19b59..1acf06388 100644 --- a/tests/verifications/openai_api/fixtures/test_cases/responses.yaml +++ b/tests/verifications/openai_api/fixtures/test_cases/responses.yaml @@ -31,6 +31,25 @@ test_response_web_search: search_context_size: "low" output: "128" +test_response_file_search: + test_name: test_response_file_search + test_params: + case: + - case_id: "llama_experts" + input: "How many experts does the Llama 4 Maverick model have?" + tools: + - type: file_search + # vector_store_ids param for file_search tool gets added by the test runner + file_content: "Llama 4 Maverick has 128 experts" + output: "128" + - case_id: "llama_experts_pdf" + input: "How many experts does the Llama 4 Maverick model have?" + tools: + - type: file_search + # vector_store_ids param for file_search toolgets added by the test runner + file_path: "pdfs/llama_stack_and_models.pdf" + output: "128" + test_response_mcp_tool: test_name: test_response_mcp_tool test_params: diff --git a/tests/verifications/openai_api/test_responses.py b/tests/verifications/openai_api/test_responses.py index 28020d3b1..1c9cdaa3a 100644 --- a/tests/verifications/openai_api/test_responses.py +++ b/tests/verifications/openai_api/test_responses.py @@ -5,6 +5,8 @@ # the root directory of this source tree. import json +import os +import time import httpx import openai @@ -23,6 +25,31 @@ from tests.verifications.openai_api.fixtures.load import load_test_cases responses_test_cases = load_test_cases("responses") +def _new_vector_store(openai_client, name): + # Ensure we don't reuse an existing vector store + vector_stores = openai_client.vector_stores.list() + for vector_store in vector_stores: + if vector_store.name == name: + openai_client.vector_stores.delete(vector_store_id=vector_store.id) + + # Create a new vector store + vector_store = openai_client.vector_stores.create( + name=name, + ) + return vector_store + + +def _upload_file(openai_client, name, file_path): + # Ensure we don't reuse an existing file + files = openai_client.files.list() + for file in files: + if file.filename == name: + openai_client.files.delete(file_id=file.id) + + # Upload a text file with our document content + return openai_client.files.create(file=open(file_path, "rb"), purpose="assistants") + + @pytest.mark.parametrize( "case", responses_test_cases["test_response_basic"]["test_params"]["case"], @@ -258,6 +285,111 @@ def test_response_non_streaming_web_search(request, openai_client, model, provid assert case["output"].lower() in response.output_text.lower().strip() +@pytest.mark.parametrize( + "case", + responses_test_cases["test_response_file_search"]["test_params"]["case"], + ids=case_id_generator, +) +def test_response_non_streaming_file_search( + request, openai_client, model, provider, verification_config, tmp_path, case +): + if isinstance(openai_client, LlamaStackAsLibraryClient): + pytest.skip("Responses API file search is not yet supported in library client.") + + test_name_base = get_base_test_name(request) + if should_skip_test(verification_config, provider, model, test_name_base): + pytest.skip(f"Skipping {test_name_base} for model {model} on provider {provider} based on config.") + + vector_store = _new_vector_store(openai_client, "test_vector_store") + + if "file_content" in case: + file_name = "test_response_non_streaming_file_search.txt" + file_path = tmp_path / file_name + file_path.write_text(case["file_content"]) + elif "file_path" in case: + file_path = os.path.join(os.path.dirname(__file__), "fixtures", case["file_path"]) + file_name = os.path.basename(file_path) + else: + raise ValueError(f"No file content or path provided for case {case['case_id']}") + + file_response = _upload_file(openai_client, file_name, file_path) + + # Attach our file to the vector store + file_attach_response = openai_client.vector_stores.files.create( + vector_store_id=vector_store.id, + file_id=file_response.id, + ) + + # Wait for the file to be attached + while file_attach_response.status == "in_progress": + time.sleep(0.1) + file_attach_response = openai_client.vector_stores.files.retrieve( + vector_store_id=vector_store.id, + file_id=file_response.id, + ) + assert file_attach_response.status == "completed", f"Expected file to be attached, got {file_attach_response}" + assert not file_attach_response.last_error + + # Update our tools with the right vector store id + tools = case["tools"] + for tool in tools: + if tool["type"] == "file_search": + tool["vector_store_ids"] = [vector_store.id] + + # Create the response request, which should query our vector store + response = openai_client.responses.create( + model=model, + input=case["input"], + tools=tools, + stream=False, + include=["file_search_call.results"], + ) + + # Verify the file_search_tool was called + assert len(response.output) > 1 + assert response.output[0].type == "file_search_call" + assert response.output[0].status == "completed" + assert response.output[0].queries # ensure it's some non-empty list + assert response.output[0].results + assert case["output"].lower() in response.output[0].results[0].text.lower() + assert response.output[0].results[0].score > 0 + + # Verify the output_text generated by the response + assert case["output"].lower() in response.output_text.lower().strip() + + +def test_response_non_streaming_file_search_empty_vector_store( + request, openai_client, model, provider, verification_config +): + if isinstance(openai_client, LlamaStackAsLibraryClient): + pytest.skip("Responses API file search is not yet supported in library client.") + + test_name_base = get_base_test_name(request) + if should_skip_test(verification_config, provider, model, test_name_base): + pytest.skip(f"Skipping {test_name_base} for model {model} on provider {provider} based on config.") + + vector_store = _new_vector_store(openai_client, "test_vector_store") + + # Create the response request, which should query our vector store + response = openai_client.responses.create( + model=model, + input="How many experts does the Llama 4 Maverick model have?", + tools=[{"type": "file_search", "vector_store_ids": [vector_store.id]}], + stream=False, + include=["file_search_call.results"], + ) + + # Verify the file_search_tool was called + assert len(response.output) > 1 + assert response.output[0].type == "file_search_call" + assert response.output[0].status == "completed" + assert response.output[0].queries # ensure it's some non-empty list + assert not response.output[0].results # ensure we don't get any results + + # Verify some output_text was generated by the response + assert response.output_text + + @pytest.mark.parametrize( "case", responses_test_cases["test_response_mcp_tool"]["test_params"]["case"],