diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 8fff470f6..75636525e 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -2,4 +2,4 @@
# These owners will be the default owners for everything in
# the repo. Unless a later match takes precedence,
-* @ashwinb @yanxi0830 @hardikjshah @raghotham @ehhuang @leseb @bbrowning @reluctantfuturist @mattf @slekkala1 @franciscojavierarceo
+* @ashwinb @raghotham @ehhuang @leseb @bbrowning @mattf @franciscojavierarceo @cdoern
diff --git a/.github/actions/setup-typescript-client/action.yml b/.github/actions/setup-typescript-client/action.yml
new file mode 100644
index 000000000..8b78ba70c
--- /dev/null
+++ b/.github/actions/setup-typescript-client/action.yml
@@ -0,0 +1,35 @@
+name: Setup TypeScript client
+description: Conditionally checkout and link llama-stack-client-typescript based on client-version
+inputs:
+ client-version:
+ description: 'Client version (latest or published)'
+ required: true
+
+outputs:
+ ts-client-path:
+ description: 'Path or version to use for TypeScript client'
+ value: ${{ steps.set-path.outputs.ts-client-path }}
+
+runs:
+ using: "composite"
+ steps:
+ - name: Checkout TypeScript client (latest)
+ if: ${{ inputs.client-version == 'latest' }}
+ uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
+ with:
+ repository: llamastack/llama-stack-client-typescript
+ ref: main
+ path: .ts-client-checkout
+
+ - name: Set TS_CLIENT_PATH
+ id: set-path
+ shell: bash
+ run: |
+ if [ "${{ inputs.client-version }}" = "latest" ]; then
+ echo "ts-client-path=${{ github.workspace }}/.ts-client-checkout" >> $GITHUB_OUTPUT
+ elif [ "${{ inputs.client-version }}" = "published" ]; then
+ echo "ts-client-path=^0.3.2" >> $GITHUB_OUTPUT
+ else
+ echo "::error::Invalid client-version: ${{ inputs.client-version }}"
+ exit 1
+ fi
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
index 71c7933b4..721c3b5a0 100644
--- a/.github/workflows/integration-tests.yml
+++ b/.github/workflows/integration-tests.yml
@@ -93,11 +93,27 @@ jobs:
suite: ${{ matrix.config.suite }}
inference-mode: 'replay'
+ - name: Setup Node.js for TypeScript client tests
+ if: ${{ matrix.client == 'server' }}
+ uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
+ with:
+ node-version: '20'
+ cache: 'npm'
+ cache-dependency-path: tests/integration/client-typescript/package-lock.json
+
+ - name: Setup TypeScript client
+ if: ${{ matrix.client == 'server' }}
+ id: setup-ts-client
+ uses: ./.github/actions/setup-typescript-client
+ with:
+ client-version: ${{ matrix.client-version }}
+
- name: Run tests
if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }}
uses: ./.github/actions/run-and-record-tests
env:
OPENAI_API_KEY: dummy
+ TS_CLIENT_PATH: ${{ steps.setup-ts-client.outputs.ts-client-path || '' }}
with:
stack-config: >-
${{ matrix.config.stack_config
diff --git a/.github/workflows/stainless-builds.yml b/.github/workflows/stainless-builds.yml
index 00c5e3df5..28869fdd8 100644
--- a/.github/workflows/stainless-builds.yml
+++ b/.github/workflows/stainless-builds.yml
@@ -43,7 +43,41 @@ env:
# Stainless organization dashboard
jobs:
+ compute-branch:
+ runs-on: ubuntu-latest
+ outputs:
+ preview_branch: ${{ steps.compute.outputs.preview_branch }}
+ base_branch: ${{ steps.compute.outputs.base_branch }}
+ merge_branch: ${{ steps.compute.outputs.merge_branch }}
+ steps:
+ - name: Compute branch names
+ id: compute
+ run: |
+ HEAD_REPO="${{ github.event.pull_request.head.repo.full_name }}"
+ BASE_REPO="${{ github.repository }}"
+ BRANCH_NAME="${{ github.event.pull_request.head.ref }}"
+ FORK_OWNER="${{ github.event.pull_request.head.repo.owner.login }}"
+
+ if [ "$HEAD_REPO" != "$BASE_REPO" ]; then
+ # Fork PR: prefix with fork owner for isolation
+ if [ -z "$FORK_OWNER" ]; then
+ echo "Error: Fork PR detected but fork owner is empty" >&2
+ exit 1
+ fi
+ PREVIEW_BRANCH="preview/${FORK_OWNER}/${BRANCH_NAME}"
+ BASE_BRANCH="preview/base/${FORK_OWNER}/${BRANCH_NAME}"
+ else
+ # Same-repo PR
+ PREVIEW_BRANCH="preview/${BRANCH_NAME}"
+ BASE_BRANCH="preview/base/${BRANCH_NAME}"
+ fi
+
+ echo "preview_branch=${PREVIEW_BRANCH}" >> $GITHUB_OUTPUT
+ echo "base_branch=${BASE_BRANCH}" >> $GITHUB_OUTPUT
+ echo "merge_branch=${PREVIEW_BRANCH}" >> $GITHUB_OUTPUT
+
preview:
+ needs: compute-branch
if: github.event.action != 'closed'
runs-on: ubuntu-latest
permissions:
@@ -59,8 +93,6 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 2
- # This action builds preview SDKs from the OpenAPI spec changes and
- # posts/updates a comment on the PR with build results and links to the preview.
- name: Run preview builds
uses: stainless-api/upload-openapi-spec-action/preview@32823b096b4319c53ee948d702d9052873af485f # 1.6.0
with:
@@ -73,8 +105,11 @@ jobs:
base_sha: ${{ github.event.pull_request.base.sha }}
base_ref: ${{ github.event.pull_request.base.ref }}
head_sha: ${{ github.event.pull_request.head.sha }}
+ branch: ${{ needs.compute-branch.outputs.preview_branch }}
+ base_branch: ${{ needs.compute-branch.outputs.base_branch }}
merge:
+ needs: compute-branch
if: github.event.action == 'closed' && github.event.pull_request.merged == true
runs-on: ubuntu-latest
permissions:
@@ -91,11 +126,11 @@ jobs:
fetch-depth: 2
# Note that this only merges in changes that happened on the last build on
- # preview/${{ github.head_ref }}. It's possible that there are OAS/config
- # changes that haven't been built, if the preview-sdk job didn't finish
+ # the computed preview branch. It's possible that there are OAS/config
+ # changes that haven't been built, if the preview job didn't finish
# before this step starts. In theory we want to wait for all builds
- # against preview/${{ github.head_ref }} to complete, but assuming that
- # the preview-sdk job happens before the PR merge, it should be fine.
+ # against the preview branch to complete, but assuming that
+ # the preview job happens before the PR merge, it should be fine.
- name: Run merge build
uses: stainless-api/upload-openapi-spec-action/merge@32823b096b4319c53ee948d702d9052873af485f # 1.6.0
with:
@@ -108,3 +143,4 @@ jobs:
base_sha: ${{ github.event.pull_request.base.sha }}
base_ref: ${{ github.event.pull_request.base.ref }}
head_sha: ${{ github.event.pull_request.head.sha }}
+ merge_branch: ${{ needs.compute-branch.outputs.merge_branch }}
diff --git a/.gitignore b/.gitignore
index f5ca450b2..0d8fd5a2f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,3 +35,5 @@ docs/static/imported-files/
docs/docs/api-deprecated/
docs/docs/api-experimental/
docs/docs/api/
+tests/integration/client-typescript/node_modules/
+.ts-client-checkout/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index c31a39406..f94356fe5 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -199,6 +199,27 @@ repos:
echo;
exit 1;
} || true
+ - id: check-api-independence
+ name: Ensure llama_stack_api does not import llama_stack
+ entry: bash
+ language: system
+ pass_filenames: false
+ require_serial: true
+ always_run: true
+ files: ^src/llama_stack_api/.*$
+ args:
+ - -c
+ - |
+ API_DIR="src/llama_stack_api"
+ grep -rn --include="*.py" -E '^[^#]*(import llama_stack\b|from llama_stack\b)' "$API_DIR" 2>/dev/null && {
+ echo "llama_stack_api must not import llama_stack";
+ exit 1;
+ }
+ [ -f "$API_DIR/pyproject.toml" ] && grep -n 'llama_stack[^_]' "$API_DIR/pyproject.toml" && {
+ echo "llama_stack_api must not depend on llama_stack in pyproject.toml";
+ exit 1;
+ }
+ exit 0
ci:
autofix_commit_msg: šØ [pre-commit.ci] Auto format from pre-commit.com hooks
diff --git a/README.md b/README.md
index 639e7280d..5360f4ff0 100644
--- a/README.md
+++ b/README.md
@@ -10,83 +10,6 @@
[**Quick Start**](https://llamastack.github.io/docs/getting_started/quickstart) | [**Documentation**](https://llamastack.github.io/docs) | [**Colab Notebook**](./docs/getting_started.ipynb) | [**Discord**](https://discord.gg/llama-stack)
-### āØš Llama 4 Support šāØ
-We released [Version 0.2.0](https://github.com/meta-llama/llama-stack/releases/tag/v0.2.0) with support for the Llama 4 herd of models released by Meta.
-
-
-
-š Click here to see how to run Llama 4 models on Llama Stack
-
-\
-*Note you need 8xH100 GPU-host to run these models*
-
-```bash
-pip install -U llama_stack
-
-MODEL="Llama-4-Scout-17B-16E-Instruct"
-# get meta url from llama.com
-huggingface-cli download meta-llama/$MODEL --local-dir ~/.llama/$MODEL
-
-# install dependencies for the distribution
-llama stack list-deps meta-reference-gpu | xargs -L1 uv pip install
-
-# start a llama stack server
-INFERENCE_MODEL=meta-llama/$MODEL llama stack run meta-reference-gpu
-
-# install client to interact with the server
-pip install llama-stack-client
-```
-### CLI
-```bash
-# Run a chat completion
-MODEL="Llama-4-Scout-17B-16E-Instruct"
-
-llama-stack-client --endpoint http://localhost:8321 \
-inference chat-completion \
---model-id meta-llama/$MODEL \
---message "write a haiku for meta's llama 4 models"
-
-OpenAIChatCompletion(
- ...
- choices=[
- OpenAIChatCompletionChoice(
- finish_reason='stop',
- index=0,
- message=OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParam(
- role='assistant',
- content='...**Silent minds awaken,** \n**Whispers of billions of words,** \n**Reasoning breaks the night.** \n\nā \n*This haiku blends the essence of LLaMA 4\'s capabilities with nature-inspired metaphor, evoking its vast training data and transformative potential.*',
- ...
- ),
- ...
- )
- ],
- ...
-)
-```
-### Python SDK
-```python
-from llama_stack_client import LlamaStackClient
-
-client = LlamaStackClient(base_url=f"http://localhost:8321")
-
-model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
-prompt = "Write a haiku about coding"
-
-print(f"User> {prompt}")
-response = client.chat.completions.create(
- model=model_id,
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": prompt},
- ],
-)
-print(f"Assistant> {response.choices[0].message.content}")
-```
-As more providers start supporting Llama 4, you can use them in Llama Stack as well. We are adding to the list. Stay tuned!
-
-
-
-
### š One-Line Installer š
To try Llama Stack locally, run:
diff --git a/client-sdks/stainless/README.md b/client-sdks/stainless/README.md
index 73e7082d4..54ff3d3d1 100644
--- a/client-sdks/stainless/README.md
+++ b/client-sdks/stainless/README.md
@@ -5,4 +5,7 @@ These are the source-of-truth configuration files used to generate the Stainless
A small side note: notice the `.yml` suffixes since Stainless uses that suffix typically for its configuration files.
-These files go hand-in-hand. As of now, only the `openapi.yml` file is automatically generated using the `scripts/run_openapi_generator.sh` script.
+These files go hand-in-hand. Both `openapi.yml` and `config.yml` are generated by `scripts/run_openapi_generator.sh`:
+
+- `openapi.yml` comes from the FastAPI-based generator.
+- `config.yml` is rendered from `scripts/openapi_generator/stainless_config/config_data.py` so the Stainless config stays in lock-step with the spec.
diff --git a/client-sdks/stainless/config.yml b/client-sdks/stainless/config.yml
index 9b26114fe..212b2b54a 100644
--- a/client-sdks/stainless/config.yml
+++ b/client-sdks/stainless/config.yml
@@ -1,20 +1,16 @@
# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json
organization:
- # Name of your organization or company, used to determine the name of the client
- # and headings.
name: llama-stack-client
docs: https://llama-stack.readthedocs.io/en/latest/
contact: llamastack@meta.com
security:
- - {}
- - BearerAuth: []
+- {}
+- BearerAuth: []
security_schemes:
BearerAuth:
type: http
scheme: bearer
-# `targets` define the output targets and their customization options, such as
-# whether to emit the Node SDK and what it's package name should be.
targets:
node:
package_name: llama-stack-client
@@ -40,71 +36,123 @@ targets:
options:
enable_v2: true
back_compat_use_shared_package: false
-
-# `client_settings` define settings for the API client, such as extra constructor
-# arguments (used for authentication), retry behavior, idempotency, etc.
client_settings:
default_env_prefix: LLAMA_STACK_CLIENT
opts:
api_key:
type: string
read_env: LLAMA_STACK_CLIENT_API_KEY
- auth: { security_scheme: BearerAuth }
+ auth:
+ security_scheme: BearerAuth
nullable: true
-
-# `environments` are a map of the name of the environment (e.g. "sandbox",
-# "production") to the corresponding url to use.
environments:
production: http://any-hosted-llama-stack.com
-
-# `pagination` defines [pagination schemes] which provides a template to match
-# endpoints and generate next-page and auto-pagination helpers in the SDKs.
pagination:
- - name: datasets_iterrows
- type: offset
- request:
- dataset_id:
- type: string
- start_index:
- type: integer
- x-stainless-pagination-property:
- purpose: offset_count_param
- limit:
- type: integer
- response:
- data:
- type: array
- items:
+- name: datasets_iterrows
+ type: offset
+ request:
+ dataset_id:
+ type: string
+ start_index:
+ type: integer
+ x-stainless-pagination-property:
+ purpose: offset_count_param
+ limit:
+ type: integer
+ response:
+ data:
+ type: array
+ items:
+ type: object
+ next_index:
+ type: integer
+ x-stainless-pagination-property:
+ purpose: offset_count_start_field
+- name: openai_cursor_page
+ type: cursor
+ request:
+ limit:
+ type: integer
+ after:
+ type: string
+ x-stainless-pagination-property:
+ purpose: next_cursor_param
+ response:
+ data:
+ type: array
+ items: {}
+ has_more:
+ type: boolean
+ last_id:
+ type: string
+ x-stainless-pagination-property:
+ purpose: next_cursor_field
+settings:
+ license: MIT
+ unwrap_response_fields:
+ - data
+ file_header: 'Copyright (c) Meta Platforms, Inc. and affiliates.
+
+ All rights reserved.
+
+
+ This source code is licensed under the terms described in the LICENSE file in
+
+ the root directory of this source tree.
+
+ '
+openapi:
+ transformations:
+ - command: mergeObject
+ reason: Better return_type using enum
+ args:
+ target:
+ - $.components.schemas
+ object:
+ ReturnType:
+ additionalProperties: false
+ properties:
+ type:
+ enum:
+ - string
+ - number
+ - boolean
+ - array
+ - object
+ - json
+ - union
+ - chat_completion_input
+ - completion_input
+ - agent_turn_input
+ required:
+ - type
type: object
- next_index:
- type: integer
- x-stainless-pagination-property:
- purpose: offset_count_start_field
- - name: openai_cursor_page
- type: cursor
- request:
- limit:
- type: integer
- after:
- type: string
- x-stainless-pagination-property:
- purpose: next_cursor_param
- response:
- data:
- type: array
- items: {}
- has_more:
- type: boolean
- last_id:
- type: string
- x-stainless-pagination-property:
- purpose: next_cursor_field
-# `resources` define the structure and organziation for your API, such as how
-# methods and models are grouped together and accessed. See the [configuration
-# guide] for more information.
-#
-# [configuration guide]:
-# https://app.stainlessapi.com/docs/guides/configure#resources
+ - command: replaceProperties
+ reason: Replace return type properties with better model (see above)
+ args:
+ filter:
+ only:
+ - $.components.schemas.ScoringFn.properties.return_type
+ - $.components.schemas.RegisterScoringFunctionRequest.properties.return_type
+ value:
+ $ref: '#/components/schemas/ReturnType'
+ - command: oneOfToAnyOf
+ reason: Prism (mock server) doesn't like one of our requests as it technically
+ matches multiple variants
+readme:
+ example_requests:
+ default:
+ type: request
+ endpoint: post /v1/chat/completions
+ params: {}
+ headline:
+ type: request
+ endpoint: get /v1/models
+ params: {}
+ pagination:
+ type: request
+ endpoint: post /v1/chat/completions
+ params: {}
resources:
$shared:
models:
@@ -128,19 +176,17 @@ resources:
methods:
get: get /v1/tools/{tool_name}
list:
- endpoint: get /v1/tools
paginated: false
-
+ endpoint: get /v1/tools
tool_runtime:
models:
tool_def: ToolDef
tool_invocation_result: ToolInvocationResult
methods:
list_tools:
- endpoint: get /v1/tool-runtime/list-tools
paginated: false
+ endpoint: get /v1/tool-runtime/list-tools
invoke_tool: post /v1/tool-runtime/invoke
-
responses:
models:
response_object_stream: OpenAIResponseObjectStream
@@ -148,10 +194,10 @@ resources:
methods:
create:
type: http
- endpoint: post /v1/responses
streaming:
stream_event_model: responses.response_object_stream
param_discriminator: stream
+ endpoint: post /v1/responses
retrieve: get /v1/responses/{response_id}
list:
type: http
@@ -164,9 +210,8 @@ resources:
methods:
list:
type: http
- endpoint: get /v1/responses/{response_id}/input_items
paginated: false
-
+ endpoint: get /v1/responses/{response_id}/input_items
prompts:
models:
prompt: Prompt
@@ -174,8 +219,8 @@ resources:
methods:
create: post /v1/prompts
list:
- endpoint: get /v1/prompts
paginated: false
+ endpoint: get /v1/prompts
retrieve: get /v1/prompts/{prompt_id}
update: post /v1/prompts/{prompt_id}
delete: delete /v1/prompts/{prompt_id}
@@ -184,9 +229,8 @@ resources:
versions:
methods:
list:
- endpoint: get /v1/prompts/{prompt_id}/versions
paginated: false
-
+ endpoint: get /v1/prompts/{prompt_id}/versions
conversations:
models:
conversation_object: Conversation
@@ -216,7 +260,6 @@ resources:
delete:
type: http
endpoint: delete /v1/conversations/{conversation_id}/items/{item_id}
-
inspect:
models:
healthInfo: HealthInfo
@@ -226,13 +269,11 @@ resources:
methods:
health: get /v1/health
version: get /v1/version
-
embeddings:
models:
create_embeddings_response: OpenAIEmbeddingsResponse
methods:
create: post /v1/embeddings
-
chat:
models:
chat_completion_chunk: OpenAIChatCompletionChunk
@@ -241,14 +282,14 @@ resources:
methods:
create:
type: http
- endpoint: post /v1/chat/completions
streaming:
stream_event_model: chat.chat_completion_chunk
param_discriminator: stream
+ endpoint: post /v1/chat/completions
list:
type: http
- endpoint: get /v1/chat/completions
paginated: false
+ endpoint: get /v1/chat/completions
retrieve:
type: http
endpoint: get /v1/chat/completions/{completion_id}
@@ -256,17 +297,15 @@ resources:
methods:
create:
type: http
- endpoint: post /v1/completions
streaming:
param_discriminator: stream
-
+ endpoint: post /v1/completions
vector_io:
models:
queryChunksResponse: QueryChunksResponse
methods:
insert: post /v1/vector-io/insert
query: post /v1/vector-io/query
-
vector_stores:
models:
vector_store: VectorStoreObject
@@ -275,8 +314,7 @@ resources:
vector_store_search_response: VectorStoreSearchResponsePage
methods:
create: post /v1/vector_stores
- list:
- endpoint: get /v1/vector_stores
+ list: get /v1/vector_stores
retrieve: get /v1/vector_stores/{vector_store_id}
update: post /v1/vector_stores/{vector_store_id}
delete: delete /v1/vector_stores/{vector_store_id}
@@ -301,15 +339,14 @@ resources:
retrieve: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}
list_files: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files
cancel: post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel
-
models:
models:
model: OpenAIModel
list_models_response: OpenAIListModelsResponse
methods:
list:
- endpoint: get /v1/models
paginated: false
+ endpoint: get /v1/models
retrieve: get /v1/models/{model_id}
register: post /v1/models
unregister: delete /v1/models/{model_id}
@@ -317,38 +354,33 @@ resources:
openai:
methods:
list:
- endpoint: get /v1/models
paginated: false
-
+ endpoint: get /v1/models
providers:
models:
list_providers_response: ListProvidersResponse
methods:
list:
- endpoint: get /v1/providers
paginated: false
+ endpoint: get /v1/providers
retrieve: get /v1/providers/{provider_id}
-
routes:
models:
list_routes_response: ListRoutesResponse
methods:
list:
- endpoint: get /v1/inspect/routes
paginated: false
-
+ endpoint: get /v1/inspect/routes
moderations:
models:
create_response: ModerationObject
methods:
create: post /v1/moderations
-
safety:
models:
run_shield_response: RunShieldResponse
methods:
run_shield: post /v1/safety/run-shield
-
shields:
models:
shield: Shield
@@ -356,53 +388,48 @@ resources:
methods:
retrieve: get /v1/shields/{identifier}
list:
- endpoint: get /v1/shields
paginated: false
+ endpoint: get /v1/shields
register: post /v1/shields
delete: delete /v1/shields/{identifier}
-
scoring:
methods:
score: post /v1/scoring/score
score_batch: post /v1/scoring/score-batch
scoring_functions:
- methods:
- retrieve: get /v1/scoring-functions/{scoring_fn_id}
- list:
- endpoint: get /v1/scoring-functions
- paginated: false
- register: post /v1/scoring-functions
- unregister: delete /v1/scoring-functions/{scoring_fn_id}
models:
scoring_fn: ScoringFn
scoring_fn_params: ScoringFnParams
list_scoring_functions_response: ListScoringFunctionsResponse
-
+ methods:
+ retrieve: get /v1/scoring-functions/{scoring_fn_id}
+ list:
+ paginated: false
+ endpoint: get /v1/scoring-functions
+ register: post /v1/scoring-functions
+ unregister: delete /v1/scoring-functions/{scoring_fn_id}
files:
+ models:
+ file: OpenAIFileObject
+ list_files_response: ListOpenAIFileResponse
+ delete_file_response: OpenAIFileDeleteResponse
methods:
create: post /v1/files
list: get /v1/files
retrieve: get /v1/files/{file_id}
delete: delete /v1/files/{file_id}
content: get /v1/files/{file_id}/content
- models:
- file: OpenAIFileObject
- list_files_response: ListOpenAIFileResponse
- delete_file_response: OpenAIFileDeleteResponse
-
batches:
methods:
create: post /v1/batches
list: get /v1/batches
retrieve: get /v1/batches/{batch_id}
cancel: post /v1/batches/{batch_id}/cancel
-
alpha:
subresources:
inference:
methods:
rerank: post /v1alpha/inference/rerank
-
post_training:
models:
algorithm_config: AlgorithmConfig
@@ -418,39 +445,35 @@ resources:
cancel: post /v1alpha/post-training/job/cancel
status: get /v1alpha/post-training/job/status
list:
- endpoint: get /v1alpha/post-training/jobs
paginated: false
-
+ endpoint: get /v1alpha/post-training/jobs
benchmarks:
- methods:
- retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}
- list:
- endpoint: get /v1alpha/eval/benchmarks
- paginated: false
- register: post /v1alpha/eval/benchmarks
- unregister: delete /v1alpha/eval/benchmarks/{benchmark_id}
models:
benchmark: Benchmark
list_benchmarks_response: ListBenchmarksResponse
-
+ methods:
+ retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}
+ list:
+ paginated: false
+ endpoint: get /v1alpha/eval/benchmarks
+ register: post /v1alpha/eval/benchmarks
+ unregister: delete /v1alpha/eval/benchmarks/{benchmark_id}
eval:
+ models:
+ evaluate_response: EvaluateResponse
+ benchmark_config: BenchmarkConfig
+ job: Job
methods:
evaluate_rows: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
run_eval: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
evaluate_rows_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
run_eval_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
-
subresources:
jobs:
methods:
cancel: delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
status: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result
- models:
- evaluate_response: EvaluateResponse
- benchmark_config: BenchmarkConfig
- job: Job
-
beta:
subresources:
datasets:
@@ -460,74 +483,8 @@ resources:
register: post /v1beta/datasets
retrieve: get /v1beta/datasets/{dataset_id}
list:
- endpoint: get /v1beta/datasets
paginated: false
+ endpoint: get /v1beta/datasets
unregister: delete /v1beta/datasets/{dataset_id}
iterrows: get /v1beta/datasetio/iterrows/{dataset_id}
appendrows: post /v1beta/datasetio/append-rows/{dataset_id}
-
-settings:
- license: MIT
- unwrap_response_fields: [data]
- file_header: |
- Copyright (c) Meta Platforms, Inc. and affiliates.
- All rights reserved.
-
- This source code is licensed under the terms described in the LICENSE file in
- the root directory of this source tree.
-
-openapi:
- transformations:
- - command: mergeObject
- reason: Better return_type using enum
- args:
- target:
- - "$.components.schemas"
- object:
- ReturnType:
- additionalProperties: false
- properties:
- type:
- enum:
- - string
- - number
- - boolean
- - array
- - object
- - json
- - union
- - chat_completion_input
- - completion_input
- - agent_turn_input
- required:
- - type
- type: object
- - command: replaceProperties
- reason: Replace return type properties with better model (see above)
- args:
- filter:
- only:
- - "$.components.schemas.ScoringFn.properties.return_type"
- - "$.components.schemas.RegisterScoringFunctionRequest.properties.return_type"
- value:
- $ref: "#/components/schemas/ReturnType"
- - command: oneOfToAnyOf
- reason: Prism (mock server) doesn't like one of our requests as it technically matches multiple variants
-
-# `readme` is used to configure the code snippets that will be rendered in the
-# README.md of various SDKs. In particular, you can change the `headline`
-# snippet's endpoint and the arguments to call it with.
-readme:
- example_requests:
- default:
- type: request
- endpoint: post /v1/chat/completions
- params: &ref_0 {}
- headline:
- type: request
- endpoint: get /v1/models
- params: *ref_0
- pagination:
- type: request
- endpoint: post /v1/chat/completions
- params: {}
diff --git a/client-sdks/stainless/openapi.yml b/client-sdks/stainless/openapi.yml
index 7e81dbd60..e658a6237 100644
--- a/client-sdks/stainless/openapi.yml
+++ b/client-sdks/stainless/openapi.yml
@@ -1820,7 +1820,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/RegisterScoringFunctionRequestLoose'
+ $ref: '#/components/schemas/RegisterScoringFunctionRequest'
required: true
deprecated: true
/v1/scoring-functions/{scoring_fn_id}:
@@ -3310,7 +3310,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/RegisterDatasetRequestLoose'
+ $ref: '#/components/schemas/RegisterDatasetRequest'
required: true
deprecated: true
/v1beta/datasets/{dataset_id}:
@@ -3567,7 +3567,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/BenchmarkConfig'
+ $ref: '#/components/schemas/RunEvalRequest'
required: true
/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}:
get:
@@ -6739,9 +6739,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -7141,6 +7142,11 @@ components:
anyOf:
- type: string
- type: 'null'
+ parallel_tool_calls:
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -7267,9 +7273,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -9871,9 +9878,21 @@ components:
title: Object
default: vector_store.file
attributes:
- additionalProperties: true
+ additionalProperties:
+ anyOf:
+ - type: string
+ maxLength: 512
+ - type: number
+ - type: boolean
+ title: string | number | boolean
+ propertyNames:
+ type: string
+ maxLength: 64
type: object
+ maxProperties: 16
title: Attributes
+ description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers.
+ x-oaiTypeLabel: map
chunking_strategy:
oneOf:
- $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto'
@@ -10602,6 +10621,14 @@ components:
- scores
title: EvaluateResponse
description: The response from an evaluation.
+ RunEvalRequest:
+ properties:
+ benchmark_config:
+ $ref: '#/components/schemas/BenchmarkConfig'
+ type: object
+ required:
+ - benchmark_config
+ title: RunEvalRequest
Job:
properties:
job_id:
@@ -11185,6 +11212,67 @@ components:
- $ref: '#/components/schemas/CompletionInputType'
title: CompletionInputType
title: StringType | ... (9 variants)
+ RegisterScoringFunctionRequest:
+ properties:
+ scoring_fn_id:
+ type: string
+ title: Scoring Fn Id
+ description:
+ type: string
+ title: Description
+ return_type:
+ anyOf:
+ - $ref: '#/components/schemas/StringType'
+ title: StringType
+ - $ref: '#/components/schemas/NumberType'
+ title: NumberType
+ - $ref: '#/components/schemas/BooleanType'
+ title: BooleanType
+ - $ref: '#/components/schemas/ArrayType'
+ title: ArrayType
+ - $ref: '#/components/schemas/ObjectType'
+ title: ObjectType
+ - $ref: '#/components/schemas/JsonType'
+ title: JsonType
+ - $ref: '#/components/schemas/UnionType'
+ title: UnionType
+ - $ref: '#/components/schemas/ChatCompletionInputType'
+ title: ChatCompletionInputType
+ - $ref: '#/components/schemas/CompletionInputType'
+ title: CompletionInputType
+ title: StringType | ... (9 variants)
+ provider_scoring_fn_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ provider_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ params:
+ anyOf:
+ - oneOf:
+ - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams'
+ title: LLMAsJudgeScoringFnParams
+ - $ref: '#/components/schemas/RegexParserScoringFnParams'
+ title: RegexParserScoringFnParams
+ - $ref: '#/components/schemas/BasicScoringFnParams'
+ title: BasicScoringFnParams
+ discriminator:
+ propertyName: type
+ mapping:
+ basic: '#/components/schemas/BasicScoringFnParams'
+ llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams'
+ regex_parser: '#/components/schemas/RegexParserScoringFnParams'
+ title: LLMAsJudgeScoringFnParams | RegexParserScoringFnParams | BasicScoringFnParams
+ - type: 'null'
+ title: Params
+ type: object
+ required:
+ - scoring_fn_id
+ - description
+ - return_type
+ title: RegisterScoringFunctionRequest
RegisterShieldRequest:
properties:
shield_id:
@@ -11243,6 +11331,31 @@ components:
- $ref: '#/components/schemas/RowsDataSource'
title: RowsDataSource
title: URIDataSource | RowsDataSource
+ RegisterDatasetRequest:
+ properties:
+ purpose:
+ $ref: '#/components/schemas/DatasetPurpose'
+ source:
+ anyOf:
+ - $ref: '#/components/schemas/URIDataSource'
+ title: URIDataSource
+ - $ref: '#/components/schemas/RowsDataSource'
+ title: RowsDataSource
+ title: URIDataSource | RowsDataSource
+ metadata:
+ anyOf:
+ - additionalProperties: true
+ type: object
+ - type: 'null'
+ dataset_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ type: object
+ required:
+ - purpose
+ - source
+ title: RegisterDatasetRequest
RegisterBenchmarkRequest:
properties:
benchmark_id:
@@ -11979,41 +12092,6 @@ components:
required:
- reasoning_tokens
title: OutputTokensDetails
- RegisterDatasetRequestLoose:
- properties:
- purpose:
- title: Purpose
- source:
- title: Source
- metadata:
- title: Metadata
- dataset_id:
- title: Dataset Id
- type: object
- required:
- - purpose
- - source
- title: RegisterDatasetRequestLoose
- RegisterScoringFunctionRequestLoose:
- properties:
- scoring_fn_id:
- title: Scoring Fn Id
- description:
- title: Description
- return_type:
- title: Return Type
- provider_scoring_fn_id:
- title: Provider Scoring Fn Id
- provider_id:
- title: Provider Id
- params:
- title: Params
- type: object
- required:
- - scoring_fn_id
- - description
- - return_type
- title: RegisterScoringFunctionRequestLoose
SearchRankingOptions:
properties:
ranker:
diff --git a/docs/docs/building_applications/tools.mdx b/docs/docs/building_applications/tools.mdx
index 3b78ec57b..f7b913fef 100644
--- a/docs/docs/building_applications/tools.mdx
+++ b/docs/docs/building_applications/tools.mdx
@@ -104,23 +104,19 @@ client.toolgroups.register(
)
```
-Note that most of the more useful MCP servers need you to authenticate with them. Many of them use OAuth2.0 for authentication. You can provide authorization headers to send to the MCP server using the "Provider Data" abstraction provided by Llama Stack. When making an agent call,
+Note that most of the more useful MCP servers need you to authenticate with them. Many of them use OAuth2.0 for authentication. You can provide the authorization token when creating the Agent:
```python
agent = Agent(
...,
- tools=["mcp::deepwiki"],
- extra_headers={
- "X-LlamaStack-Provider-Data": json.dumps(
- {
- "mcp_headers": {
- "http://mcp.deepwiki.com/sse": {
- "Authorization": "Bearer ",
- },
- },
- }
- ),
- },
+ tools=[
+ {
+ "type": "mcp",
+ "server_url": "https://mcp.deepwiki.com/sse",
+ "server_label": "mcp::deepwiki",
+ "authorization": "", # OAuth token (without "Bearer " prefix)
+ }
+ ],
)
agent.create_turn(...)
```
diff --git a/docs/docs/providers/agents/index.mdx b/docs/docs/providers/agents/index.mdx
index 06eb104af..200a3b9ca 100644
--- a/docs/docs/providers/agents/index.mdx
+++ b/docs/docs/providers/agents/index.mdx
@@ -1,7 +1,8 @@
---
-description: "Agents
+description: |
+ Agents
- APIs for creating and interacting with agentic systems."
+ APIs for creating and interacting with agentic systems.
sidebar_label: Agents
title: Agents
---
diff --git a/docs/docs/providers/agents/inline_meta-reference.mdx b/docs/docs/providers/agents/inline_meta-reference.mdx
index fac9b8406..99a67feb4 100644
--- a/docs/docs/providers/agents/inline_meta-reference.mdx
+++ b/docs/docs/providers/agents/inline_meta-reference.mdx
@@ -14,7 +14,7 @@ Meta's reference implementation of an agent system that can use tools, access ve
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `persistence` | `` | No | | |
+| `persistence` | `AgentPersistenceConfig` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/batches/index.mdx b/docs/docs/providers/batches/index.mdx
index 2c64b277f..18fd49945 100644
--- a/docs/docs/providers/batches/index.mdx
+++ b/docs/docs/providers/batches/index.mdx
@@ -1,14 +1,15 @@
---
-description: "The Batches API enables efficient processing of multiple requests in a single operation,
- particularly useful for processing large datasets, batch evaluation workflows, and
- cost-effective inference at scale.
+description: |
+ The Batches API enables efficient processing of multiple requests in a single operation,
+ particularly useful for processing large datasets, batch evaluation workflows, and
+ cost-effective inference at scale.
- The API is designed to allow use of openai client libraries for seamless integration.
+ The API is designed to allow use of openai client libraries for seamless integration.
- This API provides the following extensions:
- - idempotent batch creation
+ This API provides the following extensions:
+ - idempotent batch creation
- Note: This API is currently under active development and may undergo changes."
+ Note: This API is currently under active development and may undergo changes.
sidebar_label: Batches
title: Batches
---
diff --git a/docs/docs/providers/batches/inline_reference.mdx b/docs/docs/providers/batches/inline_reference.mdx
index 45304fbb1..0a062c245 100644
--- a/docs/docs/providers/batches/inline_reference.mdx
+++ b/docs/docs/providers/batches/inline_reference.mdx
@@ -14,9 +14,9 @@ Reference implementation of batches API with KVStore persistence.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `kvstore` | `` | No | | Configuration for the key-value store backend. |
-| `max_concurrent_batches` | `` | No | 1 | Maximum number of concurrent batches to process simultaneously. |
-| `max_concurrent_requests_per_batch` | `` | No | 10 | Maximum number of concurrent requests to process per batch. |
+| `kvstore` | `KVStoreReference` | No | | Configuration for the key-value store backend. |
+| `max_concurrent_batches` | `int` | No | 1 | Maximum number of concurrent batches to process simultaneously. |
+| `max_concurrent_requests_per_batch` | `int` | No | 10 | Maximum number of concurrent requests to process per batch. |
## Sample Configuration
diff --git a/docs/docs/providers/datasetio/inline_localfs.mdx b/docs/docs/providers/datasetio/inline_localfs.mdx
index a9363376c..4314696c5 100644
--- a/docs/docs/providers/datasetio/inline_localfs.mdx
+++ b/docs/docs/providers/datasetio/inline_localfs.mdx
@@ -14,7 +14,7 @@ Local filesystem-based dataset I/O provider for reading and writing datasets to
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `kvstore` | `` | No | | |
+| `kvstore` | `KVStoreReference` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/datasetio/remote_huggingface.mdx b/docs/docs/providers/datasetio/remote_huggingface.mdx
index de3ffaaa6..ede8ed631 100644
--- a/docs/docs/providers/datasetio/remote_huggingface.mdx
+++ b/docs/docs/providers/datasetio/remote_huggingface.mdx
@@ -14,7 +14,7 @@ HuggingFace datasets provider for accessing and managing datasets from the Huggi
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `kvstore` | `` | No | | |
+| `kvstore` | `KVStoreReference` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/datasetio/remote_nvidia.mdx b/docs/docs/providers/datasetio/remote_nvidia.mdx
index 35a7dacee..97c48d810 100644
--- a/docs/docs/providers/datasetio/remote_nvidia.mdx
+++ b/docs/docs/providers/datasetio/remote_nvidia.mdx
@@ -17,7 +17,7 @@ NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform
| `api_key` | `str \| None` | No | | The NVIDIA API key. |
| `dataset_namespace` | `str \| None` | No | default | The NVIDIA dataset namespace. |
| `project_id` | `str \| None` | No | test-project | The NVIDIA project ID. |
-| `datasets_url` | `` | No | http://nemo.test | Base URL for the NeMo Dataset API |
+| `datasets_url` | `str` | No | http://nemo.test | Base URL for the NeMo Dataset API |
## Sample Configuration
diff --git a/docs/docs/providers/eval/index.mdx b/docs/docs/providers/eval/index.mdx
index 94bafe15e..3543db246 100644
--- a/docs/docs/providers/eval/index.mdx
+++ b/docs/docs/providers/eval/index.mdx
@@ -1,7 +1,8 @@
---
-description: "Evaluations
+description: |
+ Evaluations
- Llama Stack Evaluation API for running evaluations on model and agent candidates."
+ Llama Stack Evaluation API for running evaluations on model and agent candidates.
sidebar_label: Eval
title: Eval
---
diff --git a/docs/docs/providers/eval/inline_meta-reference.mdx b/docs/docs/providers/eval/inline_meta-reference.mdx
index 2c86c18c9..f1e923ee8 100644
--- a/docs/docs/providers/eval/inline_meta-reference.mdx
+++ b/docs/docs/providers/eval/inline_meta-reference.mdx
@@ -14,7 +14,7 @@ Meta's reference implementation of evaluation tasks with support for multiple la
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `kvstore` | `` | No | | |
+| `kvstore` | `KVStoreReference` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/eval/remote_nvidia.mdx b/docs/docs/providers/eval/remote_nvidia.mdx
index 36bb4726b..311496791 100644
--- a/docs/docs/providers/eval/remote_nvidia.mdx
+++ b/docs/docs/providers/eval/remote_nvidia.mdx
@@ -14,7 +14,7 @@ NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `evaluator_url` | `` | No | http://0.0.0.0:7331 | The url for accessing the evaluator service |
+| `evaluator_url` | `str` | No | http://0.0.0.0:7331 | The url for accessing the evaluator service |
## Sample Configuration
diff --git a/docs/docs/providers/files/index.mdx b/docs/docs/providers/files/index.mdx
index 19e338035..0b28e9aee 100644
--- a/docs/docs/providers/files/index.mdx
+++ b/docs/docs/providers/files/index.mdx
@@ -1,7 +1,8 @@
---
-description: "Files
+description: |
+ Files
- This API is used to upload documents that can be used with other Llama Stack APIs."
+ This API is used to upload documents that can be used with other Llama Stack APIs.
sidebar_label: Files
title: Files
---
diff --git a/docs/docs/providers/files/inline_localfs.mdx b/docs/docs/providers/files/inline_localfs.mdx
index bff0c4eb9..aa3a9232b 100644
--- a/docs/docs/providers/files/inline_localfs.mdx
+++ b/docs/docs/providers/files/inline_localfs.mdx
@@ -14,9 +14,9 @@ Local filesystem-based file storage provider for managing files and documents lo
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `storage_dir` | `` | No | | Directory to store uploaded files |
-| `metadata_store` | `` | No | | SQL store configuration for file metadata |
-| `ttl_secs` | `` | No | 31536000 | |
+| `storage_dir` | `str` | No | | Directory to store uploaded files |
+| `metadata_store` | `SqlStoreReference` | No | | SQL store configuration for file metadata |
+| `ttl_secs` | `int` | No | 31536000 | |
## Sample Configuration
diff --git a/docs/docs/providers/files/remote_openai.mdx b/docs/docs/providers/files/remote_openai.mdx
index 3b5c40aad..48fe2fd57 100644
--- a/docs/docs/providers/files/remote_openai.mdx
+++ b/docs/docs/providers/files/remote_openai.mdx
@@ -14,8 +14,8 @@ OpenAI Files API provider for managing files through OpenAI's native file storag
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `api_key` | `` | No | | OpenAI API key for authentication |
-| `metadata_store` | `` | No | | SQL store configuration for file metadata |
+| `api_key` | `str` | No | | OpenAI API key for authentication |
+| `metadata_store` | `SqlStoreReference` | No | | SQL store configuration for file metadata |
## Sample Configuration
diff --git a/docs/docs/providers/files/remote_s3.mdx b/docs/docs/providers/files/remote_s3.mdx
index 65cd545c5..857ba1819 100644
--- a/docs/docs/providers/files/remote_s3.mdx
+++ b/docs/docs/providers/files/remote_s3.mdx
@@ -14,13 +14,13 @@ AWS S3-based file storage provider for scalable cloud file management with metad
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `bucket_name` | `` | No | | S3 bucket name to store files |
-| `region` | `` | No | us-east-1 | AWS region where the bucket is located |
+| `bucket_name` | `str` | No | | S3 bucket name to store files |
+| `region` | `str` | No | us-east-1 | AWS region where the bucket is located |
| `aws_access_key_id` | `str \| None` | No | | AWS access key ID (optional if using IAM roles) |
| `aws_secret_access_key` | `str \| None` | No | | AWS secret access key (optional if using IAM roles) |
| `endpoint_url` | `str \| None` | No | | Custom S3 endpoint URL (for MinIO, LocalStack, etc.) |
-| `auto_create_bucket` | `` | No | False | Automatically create the S3 bucket if it doesn't exist |
-| `metadata_store` | `` | No | | SQL store configuration for file metadata |
+| `auto_create_bucket` | `bool` | No | False | Automatically create the S3 bucket if it doesn't exist |
+| `metadata_store` | `SqlStoreReference` | No | | SQL store configuration for file metadata |
## Sample Configuration
diff --git a/docs/docs/providers/inference/index.mdx b/docs/docs/providers/inference/index.mdx
index 478611420..e2d94bfaf 100644
--- a/docs/docs/providers/inference/index.mdx
+++ b/docs/docs/providers/inference/index.mdx
@@ -1,12 +1,13 @@
---
-description: "Inference
+description: |
+ Inference
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ Llama Stack Inference API for generating completions, chat completions, and embeddings.
- This API provides the raw interface to the underlying models. Three kinds of models are supported:
- - LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.
- - Embedding models: these models generate embeddings to be used for semantic search.
- - Rerank models: these models reorder the documents based on their relevance to a query."
+ This API provides the raw interface to the underlying models. Three kinds of models are supported:
+ - LLM models: these models generate "raw" and "chat" (conversational) completions.
+ - Embedding models: these models generate embeddings to be used for semantic search.
+ - Rerank models: these models reorder the documents based on their relevance to a query.
sidebar_label: Inference
title: Inference
---
diff --git a/docs/docs/providers/inference/inline_meta-reference.mdx b/docs/docs/providers/inference/inline_meta-reference.mdx
index 328586f9a..55b1606b0 100644
--- a/docs/docs/providers/inference/inline_meta-reference.mdx
+++ b/docs/docs/providers/inference/inline_meta-reference.mdx
@@ -16,12 +16,12 @@ Meta's reference implementation of inference with support for various model form
|-------|------|----------|---------|-------------|
| `model` | `str \| None` | No | | |
| `torch_seed` | `int \| None` | No | | |
-| `max_seq_len` | `` | No | 4096 | |
-| `max_batch_size` | `` | No | 1 | |
+| `max_seq_len` | `int` | No | 4096 | |
+| `max_batch_size` | `int` | No | 1 | |
| `model_parallel_size` | `int \| None` | No | | |
-| `create_distributed_process_group` | `` | No | True | |
+| `create_distributed_process_group` | `bool` | No | True | |
| `checkpoint_dir` | `str \| None` | No | | |
-| `quantization` | `Bf16QuantizationConfig \| Fp8QuantizationConfig \| Int4QuantizationConfig, annotation=NoneType, required=True, discriminator='type'` | No | | |
+| `quantization` | `Bf16QuantizationConfig \| Fp8QuantizationConfig \| Int4QuantizationConfig \| None` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/inference/remote_anthropic.mdx b/docs/docs/providers/inference/remote_anthropic.mdx
index 4acbbac50..14b431894 100644
--- a/docs/docs/providers/inference/remote_anthropic.mdx
+++ b/docs/docs/providers/inference/remote_anthropic.mdx
@@ -14,9 +14,9 @@ Anthropic inference provider for accessing Claude models and Anthropic's AI serv
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
## Sample Configuration
diff --git a/docs/docs/providers/inference/remote_azure.mdx b/docs/docs/providers/inference/remote_azure.mdx
index b3041259e..0382b42d7 100644
--- a/docs/docs/providers/inference/remote_azure.mdx
+++ b/docs/docs/providers/inference/remote_azure.mdx
@@ -21,10 +21,10 @@ https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `api_base` | `` | No | | Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com) |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | | Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com/openai/v1) |
| `api_version` | `str \| None` | No | | Azure API version for Azure (e.g., 2024-12-01-preview) |
| `api_type` | `str \| None` | No | azure | Azure API type for Azure (e.g., azure) |
@@ -32,7 +32,7 @@ https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview
```yaml
api_key: ${env.AZURE_API_KEY:=}
-api_base: ${env.AZURE_API_BASE:=}
+base_url: ${env.AZURE_API_BASE:=}
api_version: ${env.AZURE_API_VERSION:=}
api_type: ${env.AZURE_API_TYPE:=}
```
diff --git a/docs/docs/providers/inference/remote_bedrock.mdx b/docs/docs/providers/inference/remote_bedrock.mdx
index 61931643e..0b36ea01a 100644
--- a/docs/docs/providers/inference/remote_bedrock.mdx
+++ b/docs/docs/providers/inference/remote_bedrock.mdx
@@ -14,14 +14,14 @@ AWS Bedrock inference provider using OpenAI compatible endpoint.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `region_name` | `` | No | us-east-2 | AWS Region for the Bedrock Runtime endpoint |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `region_name` | `str` | No | us-east-2 | AWS Region for the Bedrock Runtime endpoint |
## Sample Configuration
```yaml
-api_key: ${env.AWS_BEDROCK_API_KEY:=}
+api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=}
region_name: ${env.AWS_DEFAULT_REGION:=us-east-2}
```
diff --git a/docs/docs/providers/inference/remote_cerebras.mdx b/docs/docs/providers/inference/remote_cerebras.mdx
index cda0be224..9fd390a29 100644
--- a/docs/docs/providers/inference/remote_cerebras.mdx
+++ b/docs/docs/providers/inference/remote_cerebras.mdx
@@ -14,14 +14,14 @@ Cerebras inference provider for running models on Cerebras Cloud platform.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `base_url` | `` | No | https://api.cerebras.ai | Base URL for the Cerebras API |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | https://api.cerebras.ai/v1 | Base URL for the Cerebras API |
## Sample Configuration
```yaml
-base_url: https://api.cerebras.ai
+base_url: https://api.cerebras.ai/v1
api_key: ${env.CEREBRAS_API_KEY:=}
```
diff --git a/docs/docs/providers/inference/remote_databricks.mdx b/docs/docs/providers/inference/remote_databricks.mdx
index f14fd0175..d50c52958 100644
--- a/docs/docs/providers/inference/remote_databricks.mdx
+++ b/docs/docs/providers/inference/remote_databricks.mdx
@@ -14,14 +14,14 @@ Databricks inference provider for running models on Databricks' unified analytic
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_token` | `pydantic.types.SecretStr \| None` | No | | The Databricks API token |
-| `url` | `str \| None` | No | | The URL for the Databricks model serving endpoint |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_token` | `SecretStr \| None` | No | | The Databricks API token |
+| `base_url` | `HttpUrl \| None` | No | | The URL for the Databricks model serving endpoint (should include /serving-endpoints path) |
## Sample Configuration
```yaml
-url: ${env.DATABRICKS_HOST:=}
+base_url: ${env.DATABRICKS_HOST:=}
api_token: ${env.DATABRICKS_TOKEN:=}
```
diff --git a/docs/docs/providers/inference/remote_fireworks.mdx b/docs/docs/providers/inference/remote_fireworks.mdx
index 71f16ccec..a67403a9b 100644
--- a/docs/docs/providers/inference/remote_fireworks.mdx
+++ b/docs/docs/providers/inference/remote_fireworks.mdx
@@ -14,14 +14,14 @@ Fireworks AI inference provider for Llama models and other AI models on the Fire
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `url` | `` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server |
## Sample Configuration
```yaml
-url: https://api.fireworks.ai/inference/v1
+base_url: https://api.fireworks.ai/inference/v1
api_key: ${env.FIREWORKS_API_KEY:=}
```
diff --git a/docs/docs/providers/inference/remote_gemini.mdx b/docs/docs/providers/inference/remote_gemini.mdx
index 22b3c8cb7..75e6b9692 100644
--- a/docs/docs/providers/inference/remote_gemini.mdx
+++ b/docs/docs/providers/inference/remote_gemini.mdx
@@ -14,9 +14,9 @@ Google Gemini inference provider for accessing Gemini models and Google's AI ser
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
## Sample Configuration
diff --git a/docs/docs/providers/inference/remote_groq.mdx b/docs/docs/providers/inference/remote_groq.mdx
index aaf1516ca..17acd3140 100644
--- a/docs/docs/providers/inference/remote_groq.mdx
+++ b/docs/docs/providers/inference/remote_groq.mdx
@@ -14,14 +14,14 @@ Groq inference provider for ultra-fast inference using Groq's LPU technology.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `url` | `` | No | https://api.groq.com | The URL for the Groq AI server |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | https://api.groq.com/openai/v1 | The URL for the Groq AI server |
## Sample Configuration
```yaml
-url: https://api.groq.com
+base_url: https://api.groq.com/openai/v1
api_key: ${env.GROQ_API_KEY:=}
```
diff --git a/docs/docs/providers/inference/remote_hf_endpoint.mdx b/docs/docs/providers/inference/remote_hf_endpoint.mdx
index 771b24f8d..52b40c1f2 100644
--- a/docs/docs/providers/inference/remote_hf_endpoint.mdx
+++ b/docs/docs/providers/inference/remote_hf_endpoint.mdx
@@ -14,8 +14,8 @@ HuggingFace Inference Endpoints provider for dedicated model serving.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `endpoint_name` | `` | No | | The name of the Hugging Face Inference Endpoint in the format of '{namespace}/{endpoint_name}' (e.g. 'my-cool-org/meta-llama-3-1-8b-instruct-rce'). Namespace is optional and will default to the user account if not provided. |
-| `api_token` | `pydantic.types.SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
+| `endpoint_name` | `str` | No | | The name of the Hugging Face Inference Endpoint in the format of '{namespace}/{endpoint_name}' (e.g. 'my-cool-org/meta-llama-3-1-8b-instruct-rce'). Namespace is optional and will default to the user account if not provided. |
+| `api_token` | `SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
## Sample Configuration
diff --git a/docs/docs/providers/inference/remote_hf_serverless.mdx b/docs/docs/providers/inference/remote_hf_serverless.mdx
index 1a89b8e3e..52280df82 100644
--- a/docs/docs/providers/inference/remote_hf_serverless.mdx
+++ b/docs/docs/providers/inference/remote_hf_serverless.mdx
@@ -14,8 +14,8 @@ HuggingFace Inference API serverless provider for on-demand model inference.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `huggingface_repo` | `` | No | | The model ID of the model on the Hugging Face Hub (e.g. 'meta-llama/Meta-Llama-3.1-70B-Instruct') |
-| `api_token` | `pydantic.types.SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
+| `huggingface_repo` | `str` | No | | The model ID of the model on the Hugging Face Hub (e.g. 'meta-llama/Meta-Llama-3.1-70B-Instruct') |
+| `api_token` | `SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
## Sample Configuration
diff --git a/docs/docs/providers/inference/remote_llama-openai-compat.mdx b/docs/docs/providers/inference/remote_llama-openai-compat.mdx
index 9769c0793..69e90b2ac 100644
--- a/docs/docs/providers/inference/remote_llama-openai-compat.mdx
+++ b/docs/docs/providers/inference/remote_llama-openai-compat.mdx
@@ -14,14 +14,14 @@ Llama OpenAI-compatible provider for using Llama models with OpenAI API format.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `openai_compat_api_base` | `` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server |
## Sample Configuration
```yaml
-openai_compat_api_base: https://api.llama.com/compat/v1/
+base_url: https://api.llama.com/compat/v1/
api_key: ${env.LLAMA_API_KEY}
```
diff --git a/docs/docs/providers/inference/remote_nvidia.mdx b/docs/docs/providers/inference/remote_nvidia.mdx
index 57c64ab46..a890bc57f 100644
--- a/docs/docs/providers/inference/remote_nvidia.mdx
+++ b/docs/docs/providers/inference/remote_nvidia.mdx
@@ -14,18 +14,16 @@ NVIDIA inference provider for accessing NVIDIA NIM models and AI services.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `url` | `` | No | https://integrate.api.nvidia.com | A base url for accessing the NVIDIA NIM |
-| `timeout` | `` | No | 60 | Timeout for the HTTP requests |
-| `append_api_version` | `` | No | True | When set to false, the API version will not be appended to the base_url. By default, it is true. |
-| `rerank_model_to_url` | `dict[str, str` | No | `{'nv-rerank-qa-mistral-4b:1': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking', 'nvidia/nv-rerankqa-mistral-4b-v3': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/nv-rerankqa-mistral-4b-v3/reranking', 'nvidia/llama-3.2-nv-rerankqa-1b-v2': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/llama-3_2-nv-rerankqa-1b-v2/reranking'}` | Mapping of rerank model identifiers to their API endpoints. |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | https://integrate.api.nvidia.com/v1 | A base url for accessing the NVIDIA NIM |
+| `timeout` | `int` | No | 60 | Timeout for the HTTP requests |
+| `rerank_model_to_url` | `dict[str, str]` | No | `{'nv-rerank-qa-mistral-4b:1': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking', 'nvidia/nv-rerankqa-mistral-4b-v3': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/nv-rerankqa-mistral-4b-v3/reranking', 'nvidia/llama-3.2-nv-rerankqa-1b-v2': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/llama-3_2-nv-rerankqa-1b-v2/reranking'}` | Mapping of rerank model identifiers to their API endpoints. |
## Sample Configuration
```yaml
-url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
+base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
api_key: ${env.NVIDIA_API_KEY:=}
-append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
```
diff --git a/docs/docs/providers/inference/remote_oci.mdx b/docs/docs/providers/inference/remote_oci.mdx
index 33a201a55..d448755bf 100644
--- a/docs/docs/providers/inference/remote_oci.mdx
+++ b/docs/docs/providers/inference/remote_oci.mdx
@@ -21,14 +21,14 @@ https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `oci_auth_type` | `` | No | instance_principal | OCI authentication type (must be one of: instance_principal, config_file) |
-| `oci_region` | `` | No | us-ashburn-1 | OCI region (e.g., us-ashburn-1) |
-| `oci_compartment_id` | `` | No | | OCI compartment ID for the Generative AI service |
-| `oci_config_file_path` | `` | No | ~/.oci/config | OCI config file path (required if oci_auth_type is config_file) |
-| `oci_config_profile` | `` | No | DEFAULT | OCI config profile (required if oci_auth_type is config_file) |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `oci_auth_type` | `str` | No | instance_principal | OCI authentication type (must be one of: instance_principal, config_file) |
+| `oci_region` | `str` | No | us-ashburn-1 | OCI region (e.g., us-ashburn-1) |
+| `oci_compartment_id` | `str` | No | | OCI compartment ID for the Generative AI service |
+| `oci_config_file_path` | `str` | No | ~/.oci/config | OCI config file path (required if oci_auth_type is config_file) |
+| `oci_config_profile` | `str` | No | DEFAULT | OCI config profile (required if oci_auth_type is config_file) |
## Sample Configuration
diff --git a/docs/docs/providers/inference/remote_ollama.mdx b/docs/docs/providers/inference/remote_ollama.mdx
index e00e34e4a..f9be84add 100644
--- a/docs/docs/providers/inference/remote_ollama.mdx
+++ b/docs/docs/providers/inference/remote_ollama.mdx
@@ -14,12 +14,12 @@ Ollama inference provider for running local models through the Ollama runtime.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `url` | `` | No | http://localhost:11434 | |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `base_url` | `HttpUrl \| None` | No | http://localhost:11434/v1 | |
## Sample Configuration
```yaml
-url: ${env.OLLAMA_URL:=http://localhost:11434}
+base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1}
```
diff --git a/docs/docs/providers/inference/remote_openai.mdx b/docs/docs/providers/inference/remote_openai.mdx
index 28c8ab7bf..3ac3a21ad 100644
--- a/docs/docs/providers/inference/remote_openai.mdx
+++ b/docs/docs/providers/inference/remote_openai.mdx
@@ -14,10 +14,10 @@ OpenAI inference provider for accessing GPT models and other OpenAI services.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `base_url` | `` | No | https://api.openai.com/v1 | Base URL for OpenAI API |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | https://api.openai.com/v1 | Base URL for OpenAI API |
## Sample Configuration
diff --git a/docs/docs/providers/inference/remote_passthrough.mdx b/docs/docs/providers/inference/remote_passthrough.mdx
index 957cd04da..325ecc352 100644
--- a/docs/docs/providers/inference/remote_passthrough.mdx
+++ b/docs/docs/providers/inference/remote_passthrough.mdx
@@ -14,14 +14,14 @@ Passthrough inference provider for connecting to any external inference service
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `url` | `` | No | | The URL for the passthrough endpoint |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | | The URL for the passthrough endpoint |
## Sample Configuration
```yaml
-url: ${env.PASSTHROUGH_URL}
+base_url: ${env.PASSTHROUGH_URL}
api_key: ${env.PASSTHROUGH_API_KEY}
```
diff --git a/docs/docs/providers/inference/remote_runpod.mdx b/docs/docs/providers/inference/remote_runpod.mdx
index 3cbbd0322..6cdcdd3b5 100644
--- a/docs/docs/providers/inference/remote_runpod.mdx
+++ b/docs/docs/providers/inference/remote_runpod.mdx
@@ -14,14 +14,14 @@ RunPod inference provider for running models on RunPod's cloud GPU platform.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_token` | `pydantic.types.SecretStr \| None` | No | | The API token |
-| `url` | `str \| None` | No | | The URL for the Runpod model serving endpoint |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_token` | `SecretStr \| None` | No | | The API token |
+| `base_url` | `HttpUrl \| None` | No | | The URL for the Runpod model serving endpoint |
## Sample Configuration
```yaml
-url: ${env.RUNPOD_URL:=}
+base_url: ${env.RUNPOD_URL:=}
api_token: ${env.RUNPOD_API_TOKEN}
```
diff --git a/docs/docs/providers/inference/remote_sambanova.mdx b/docs/docs/providers/inference/remote_sambanova.mdx
index 0ac4600b7..bbefdb0f0 100644
--- a/docs/docs/providers/inference/remote_sambanova.mdx
+++ b/docs/docs/providers/inference/remote_sambanova.mdx
@@ -14,14 +14,14 @@ SambaNova inference provider for running models on SambaNova's dataflow architec
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `url` | `` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server |
## Sample Configuration
```yaml
-url: https://api.sambanova.ai/v1
+base_url: https://api.sambanova.ai/v1
api_key: ${env.SAMBANOVA_API_KEY:=}
```
diff --git a/docs/docs/providers/inference/remote_tgi.mdx b/docs/docs/providers/inference/remote_tgi.mdx
index 67fe6d237..3790acdd4 100644
--- a/docs/docs/providers/inference/remote_tgi.mdx
+++ b/docs/docs/providers/inference/remote_tgi.mdx
@@ -14,12 +14,12 @@ Text Generation Inference (TGI) provider for HuggingFace model serving.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `url` | `` | No | | The URL for the TGI serving endpoint |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `base_url` | `HttpUrl \| None` | No | | The URL for the TGI serving endpoint (should include /v1 path) |
## Sample Configuration
```yaml
-url: ${env.TGI_URL:=}
+base_url: ${env.TGI_URL:=}
```
diff --git a/docs/docs/providers/inference/remote_together.mdx b/docs/docs/providers/inference/remote_together.mdx
index c8e3bcdcf..dc025b5ac 100644
--- a/docs/docs/providers/inference/remote_together.mdx
+++ b/docs/docs/providers/inference/remote_together.mdx
@@ -14,14 +14,14 @@ Together AI inference provider for open-source models and collaborative AI devel
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `url` | `` | No | https://api.together.xyz/v1 | The URL for the Together AI server |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | https://api.together.xyz/v1 | The URL for the Together AI server |
## Sample Configuration
```yaml
-url: https://api.together.xyz/v1
+base_url: https://api.together.xyz/v1
api_key: ${env.TOGETHER_API_KEY:=}
```
diff --git a/docs/docs/providers/inference/remote_vertexai.mdx b/docs/docs/providers/inference/remote_vertexai.mdx
index c182ed485..59b574561 100644
--- a/docs/docs/providers/inference/remote_vertexai.mdx
+++ b/docs/docs/providers/inference/remote_vertexai.mdx
@@ -53,10 +53,10 @@ Available Models:
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `project` | `` | No | | Google Cloud project ID for Vertex AI |
-| `location` | `` | No | us-central1 | Google Cloud location for Vertex AI |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `project` | `str` | No | | Google Cloud project ID for Vertex AI |
+| `location` | `str` | No | us-central1 | Google Cloud location for Vertex AI |
## Sample Configuration
diff --git a/docs/docs/providers/inference/remote_vllm.mdx b/docs/docs/providers/inference/remote_vllm.mdx
index f844bcee0..a52c24adb 100644
--- a/docs/docs/providers/inference/remote_vllm.mdx
+++ b/docs/docs/providers/inference/remote_vllm.mdx
@@ -14,17 +14,17 @@ Remote vLLM inference provider for connecting to vLLM servers.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_token` | `pydantic.types.SecretStr \| None` | No | | The API token |
-| `url` | `str \| None` | No | | The URL for the vLLM model serving endpoint |
-| `max_tokens` | `` | No | 4096 | Maximum number of tokens to generate. |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_token` | `SecretStr \| None` | No | | The API token |
+| `base_url` | `HttpUrl \| None` | No | | The URL for the vLLM model serving endpoint |
+| `max_tokens` | `int` | No | 4096 | Maximum number of tokens to generate. |
| `tls_verify` | `bool \| str` | No | True | Whether to verify TLS certificates. Can be a boolean or a path to a CA certificate file. |
## Sample Configuration
```yaml
-url: ${env.VLLM_URL:=}
+base_url: ${env.VLLM_URL:=}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
diff --git a/docs/docs/providers/inference/remote_watsonx.mdx b/docs/docs/providers/inference/remote_watsonx.mdx
index 2227aa1cc..47d543e3a 100644
--- a/docs/docs/providers/inference/remote_watsonx.mdx
+++ b/docs/docs/providers/inference/remote_watsonx.mdx
@@ -14,17 +14,17 @@ IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
-| `url` | `` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
+| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
+| `base_url` | `HttpUrl \| None` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai |
| `project_id` | `str \| None` | No | | The watsonx.ai project ID |
-| `timeout` | `` | No | 60 | Timeout for the HTTP requests |
+| `timeout` | `int` | No | 60 | Timeout for the HTTP requests |
## Sample Configuration
```yaml
-url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com}
+base_url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com}
api_key: ${env.WATSONX_API_KEY:=}
project_id: ${env.WATSONX_PROJECT_ID:=}
```
diff --git a/docs/docs/providers/post_training/inline_huggingface-gpu.mdx b/docs/docs/providers/post_training/inline_huggingface-gpu.mdx
index ac7644de7..0d4241b27 100644
--- a/docs/docs/providers/post_training/inline_huggingface-gpu.mdx
+++ b/docs/docs/providers/post_training/inline_huggingface-gpu.mdx
@@ -14,23 +14,23 @@ HuggingFace-based post-training provider for fine-tuning models using the Huggin
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `device` | `` | No | cuda | |
-| `distributed_backend` | `Literal['fsdp', 'deepspeed'` | No | | |
-| `checkpoint_format` | `Literal['full_state', 'huggingface'` | No | huggingface | |
-| `chat_template` | `` | No | `<|user|>`
`{input}`
`<|assistant|>`
`{output}` | |
-| `model_specific_config` | `` | No | `{'trust_remote_code': True, 'attn_implementation': 'sdpa'}` | |
-| `max_seq_length` | `` | No | 2048 | |
-| `gradient_checkpointing` | `` | No | False | |
-| `save_total_limit` | `` | No | 3 | |
-| `logging_steps` | `` | No | 10 | |
-| `warmup_ratio` | `` | No | 0.1 | |
-| `weight_decay` | `` | No | 0.01 | |
-| `dataloader_num_workers` | `` | No | 4 | |
-| `dataloader_pin_memory` | `` | No | True | |
-| `dpo_beta` | `` | No | 0.1 | |
-| `use_reference_model` | `` | No | True | |
-| `dpo_loss_type` | `Literal['sigmoid', 'hinge', 'ipo', 'kto_pair'` | No | sigmoid | |
-| `dpo_output_dir` | `` | No | | |
+| `device` | `str` | No | cuda | |
+| `distributed_backend` | `Literal[fsdp, deepspeed] \| None` | No | | |
+| `checkpoint_format` | `Literal[full_state, huggingface] \| None` | No | huggingface | |
+| `chat_template` | `str` | No | `<|user|>`
`{input}`
`<|assistant|>`
`{output}` | |
+| `model_specific_config` | `dict` | No | `{'trust_remote_code': True, 'attn_implementation': 'sdpa'}` | |
+| `max_seq_length` | `int` | No | 2048 | |
+| `gradient_checkpointing` | `bool` | No | False | |
+| `save_total_limit` | `int` | No | 3 | |
+| `logging_steps` | `int` | No | 10 | |
+| `warmup_ratio` | `float` | No | 0.1 | |
+| `weight_decay` | `float` | No | 0.01 | |
+| `dataloader_num_workers` | `int` | No | 4 | |
+| `dataloader_pin_memory` | `bool` | No | True | |
+| `dpo_beta` | `float` | No | 0.1 | |
+| `use_reference_model` | `bool` | No | True | |
+| `dpo_loss_type` | `Literal[sigmoid, hinge, ipo, kto_pair]` | No | sigmoid | |
+| `dpo_output_dir` | `str` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/post_training/inline_torchtune-cpu.mdx b/docs/docs/providers/post_training/inline_torchtune-cpu.mdx
index f789392fc..3e2c15d3e 100644
--- a/docs/docs/providers/post_training/inline_torchtune-cpu.mdx
+++ b/docs/docs/providers/post_training/inline_torchtune-cpu.mdx
@@ -15,7 +15,7 @@ TorchTune-based post-training provider for fine-tuning and optimizing models usi
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
| `torch_seed` | `int \| None` | No | | |
-| `checkpoint_format` | `Literal['meta', 'huggingface'` | No | meta | |
+| `checkpoint_format` | `Literal[meta, huggingface] \| None` | No | meta | |
## Sample Configuration
diff --git a/docs/docs/providers/post_training/inline_torchtune-gpu.mdx b/docs/docs/providers/post_training/inline_torchtune-gpu.mdx
index bd87797af..ac222d8a5 100644
--- a/docs/docs/providers/post_training/inline_torchtune-gpu.mdx
+++ b/docs/docs/providers/post_training/inline_torchtune-gpu.mdx
@@ -15,7 +15,7 @@ TorchTune-based post-training provider for fine-tuning and optimizing models usi
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
| `torch_seed` | `int \| None` | No | | |
-| `checkpoint_format` | `Literal['meta', 'huggingface'` | No | meta | |
+| `checkpoint_format` | `Literal[meta, huggingface] \| None` | No | meta | |
## Sample Configuration
diff --git a/docs/docs/providers/post_training/remote_nvidia.mdx b/docs/docs/providers/post_training/remote_nvidia.mdx
index 448ac4c75..d0208f82f 100644
--- a/docs/docs/providers/post_training/remote_nvidia.mdx
+++ b/docs/docs/providers/post_training/remote_nvidia.mdx
@@ -18,9 +18,9 @@ NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform.
| `dataset_namespace` | `str \| None` | No | default | The NVIDIA dataset namespace. |
| `project_id` | `str \| None` | No | test-example-model@v1 | The NVIDIA project ID. |
| `customizer_url` | `str \| None` | No | | Base URL for the NeMo Customizer API |
-| `timeout` | `` | No | 300 | Timeout for the NVIDIA Post Training API |
-| `max_retries` | `` | No | 3 | Maximum number of retries for the NVIDIA Post Training API |
-| `output_model_dir` | `` | No | test-example-model@v1 | Directory to save the output model |
+| `timeout` | `int` | No | 300 | Timeout for the NVIDIA Post Training API |
+| `max_retries` | `int` | No | 3 | Maximum number of retries for the NVIDIA Post Training API |
+| `output_model_dir` | `str` | No | test-example-model@v1 | Directory to save the output model |
## Sample Configuration
diff --git a/docs/docs/providers/safety/index.mdx b/docs/docs/providers/safety/index.mdx
index 4e2de4f33..0c13de28c 100644
--- a/docs/docs/providers/safety/index.mdx
+++ b/docs/docs/providers/safety/index.mdx
@@ -1,7 +1,8 @@
---
-description: "Safety
+description: |
+ Safety
- OpenAI-compatible Moderations API."
+ OpenAI-compatible Moderations API.
sidebar_label: Safety
title: Safety
---
diff --git a/docs/docs/providers/safety/inline_llama-guard.mdx b/docs/docs/providers/safety/inline_llama-guard.mdx
index 65866c9b2..d52e7289a 100644
--- a/docs/docs/providers/safety/inline_llama-guard.mdx
+++ b/docs/docs/providers/safety/inline_llama-guard.mdx
@@ -14,7 +14,7 @@ Llama Guard safety provider for content moderation and safety filtering using Me
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `excluded_categories` | `list[str` | No | [] | |
+| `excluded_categories` | `list[str]` | No | [] | |
## Sample Configuration
diff --git a/docs/docs/providers/safety/inline_prompt-guard.mdx b/docs/docs/providers/safety/inline_prompt-guard.mdx
index c52e03e4b..dc57f8555 100644
--- a/docs/docs/providers/safety/inline_prompt-guard.mdx
+++ b/docs/docs/providers/safety/inline_prompt-guard.mdx
@@ -14,7 +14,7 @@ Prompt Guard safety provider for detecting and filtering unsafe prompts and cont
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `guard_type` | `` | No | injection | |
+| `guard_type` | `str` | No | injection | |
## Sample Configuration
diff --git a/docs/docs/providers/safety/remote_bedrock.mdx b/docs/docs/providers/safety/remote_bedrock.mdx
index 663a761f0..990bd7246 100644
--- a/docs/docs/providers/safety/remote_bedrock.mdx
+++ b/docs/docs/providers/safety/remote_bedrock.mdx
@@ -14,8 +14,8 @@ AWS Bedrock safety provider for content moderation using AWS's safety services.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
-| `refresh_models` | `` | No | False | Whether to refresh models periodically from the provider |
+| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
+| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
| `aws_access_key_id` | `str \| None` | No | | The AWS access key to use. Default use environment variable: AWS_ACCESS_KEY_ID |
| `aws_secret_access_key` | `str \| None` | No | | The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY |
| `aws_session_token` | `str \| None` | No | | The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN |
diff --git a/docs/docs/providers/safety/remote_nvidia.mdx b/docs/docs/providers/safety/remote_nvidia.mdx
index 0f665e60a..ac1fd0b03 100644
--- a/docs/docs/providers/safety/remote_nvidia.mdx
+++ b/docs/docs/providers/safety/remote_nvidia.mdx
@@ -14,7 +14,7 @@ NVIDIA's safety provider for content moderation and safety filtering.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `guardrails_service_url` | `` | No | http://0.0.0.0:7331 | The url for accessing the Guardrails service |
+| `guardrails_service_url` | `str` | No | http://0.0.0.0:7331 | The url for accessing the Guardrails service |
| `config_id` | `str \| None` | No | self-check | Guardrails configuration ID to use from the Guardrails configuration store |
## Sample Configuration
diff --git a/docs/docs/providers/safety/remote_sambanova.mdx b/docs/docs/providers/safety/remote_sambanova.mdx
index da70fce6c..69712879c 100644
--- a/docs/docs/providers/safety/remote_sambanova.mdx
+++ b/docs/docs/providers/safety/remote_sambanova.mdx
@@ -14,8 +14,8 @@ SambaNova's safety provider for content moderation and safety filtering.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `url` | `` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server |
-| `api_key` | `pydantic.types.SecretStr \| None` | No | | The SambaNova cloud API Key |
+| `url` | `str` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server |
+| `api_key` | `SecretStr \| None` | No | | The SambaNova cloud API Key |
## Sample Configuration
diff --git a/docs/docs/providers/tool_runtime/remote_bing-search.mdx b/docs/docs/providers/tool_runtime/remote_bing-search.mdx
index ec06bc20f..f97087d9e 100644
--- a/docs/docs/providers/tool_runtime/remote_bing-search.mdx
+++ b/docs/docs/providers/tool_runtime/remote_bing-search.mdx
@@ -15,7 +15,7 @@ Bing Search tool for web search capabilities using Microsoft's search engine.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
| `api_key` | `str \| None` | No | | |
-| `top_k` | `` | No | 3 | |
+| `top_k` | `int` | No | 3 | |
## Sample Configuration
diff --git a/docs/docs/providers/tool_runtime/remote_brave-search.mdx b/docs/docs/providers/tool_runtime/remote_brave-search.mdx
index 3aeed67d5..987ce0e41 100644
--- a/docs/docs/providers/tool_runtime/remote_brave-search.mdx
+++ b/docs/docs/providers/tool_runtime/remote_brave-search.mdx
@@ -15,7 +15,7 @@ Brave Search tool for web search capabilities with privacy-focused results.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
| `api_key` | `str \| None` | No | | The Brave Search API Key |
-| `max_results` | `` | No | 3 | The maximum number of results to return |
+| `max_results` | `int` | No | 3 | The maximum number of results to return |
## Sample Configuration
diff --git a/docs/docs/providers/tool_runtime/remote_tavily-search.mdx b/docs/docs/providers/tool_runtime/remote_tavily-search.mdx
index fdca31bbe..36ad63646 100644
--- a/docs/docs/providers/tool_runtime/remote_tavily-search.mdx
+++ b/docs/docs/providers/tool_runtime/remote_tavily-search.mdx
@@ -15,7 +15,7 @@ Tavily Search tool for AI-optimized web search with structured results.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
| `api_key` | `str \| None` | No | | The Tavily Search API Key |
-| `max_results` | `` | No | 3 | The maximum number of results to return |
+| `max_results` | `int` | No | 3 | The maximum number of results to return |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/inline_chromadb.mdx b/docs/docs/providers/vector_io/inline_chromadb.mdx
index 0be5cd5b3..d78a67b01 100644
--- a/docs/docs/providers/vector_io/inline_chromadb.mdx
+++ b/docs/docs/providers/vector_io/inline_chromadb.mdx
@@ -78,8 +78,8 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `db_path` | `` | No | | |
-| `persistence` | `` | No | | Config for KV store backend |
+| `db_path` | `str` | No | | |
+| `persistence` | `KVStoreReference` | No | | Config for KV store backend |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/inline_faiss.mdx b/docs/docs/providers/vector_io/inline_faiss.mdx
index 3a1fba055..c1eedf9db 100644
--- a/docs/docs/providers/vector_io/inline_faiss.mdx
+++ b/docs/docs/providers/vector_io/inline_faiss.mdx
@@ -95,7 +95,7 @@ more details about Faiss in general.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `persistence` | `` | No | | |
+| `persistence` | `KVStoreReference` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/inline_meta-reference.mdx b/docs/docs/providers/vector_io/inline_meta-reference.mdx
index 17fd40cf5..9266b65b5 100644
--- a/docs/docs/providers/vector_io/inline_meta-reference.mdx
+++ b/docs/docs/providers/vector_io/inline_meta-reference.mdx
@@ -14,7 +14,7 @@ Meta's reference implementation of a vector database.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `persistence` | `` | No | | |
+| `persistence` | `KVStoreReference` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/inline_milvus.mdx b/docs/docs/providers/vector_io/inline_milvus.mdx
index 6063edab1..e8408a74f 100644
--- a/docs/docs/providers/vector_io/inline_milvus.mdx
+++ b/docs/docs/providers/vector_io/inline_milvus.mdx
@@ -16,9 +16,9 @@ Please refer to the remote provider documentation.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `db_path` | `` | No | | |
-| `persistence` | `` | No | | Config for KV store backend (SQLite only for now) |
-| `consistency_level` | `` | No | Strong | The consistency level of the Milvus server |
+| `db_path` | `str` | No | | |
+| `persistence` | `KVStoreReference` | No | | Config for KV store backend (SQLite only for now) |
+| `consistency_level` | `str` | No | Strong | The consistency level of the Milvus server |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/inline_qdrant.mdx b/docs/docs/providers/vector_io/inline_qdrant.mdx
index 057d96761..8f6155732 100644
--- a/docs/docs/providers/vector_io/inline_qdrant.mdx
+++ b/docs/docs/providers/vector_io/inline_qdrant.mdx
@@ -97,8 +97,8 @@ See the [Qdrant documentation](https://qdrant.tech/documentation/) for more deta
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `path` | `` | No | | |
-| `persistence` | `` | No | | |
+| `path` | `str` | No | | |
+| `persistence` | `KVStoreReference` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/inline_sqlite-vec.mdx b/docs/docs/providers/vector_io/inline_sqlite-vec.mdx
index 45631dff3..b63d9db72 100644
--- a/docs/docs/providers/vector_io/inline_sqlite-vec.mdx
+++ b/docs/docs/providers/vector_io/inline_sqlite-vec.mdx
@@ -407,8 +407,8 @@ See [sqlite-vec's GitHub repo](https://github.com/asg017/sqlite-vec/tree/main) f
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `db_path` | `` | No | | Path to the SQLite database file |
-| `persistence` | `` | No | | Config for KV store backend (SQLite only for now) |
+| `db_path` | `str` | No | | Path to the SQLite database file |
+| `persistence` | `KVStoreReference` | No | | Config for KV store backend (SQLite only for now) |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/inline_sqlite_vec.mdx b/docs/docs/providers/vector_io/inline_sqlite_vec.mdx
index 67cbd0021..a25ff1b28 100644
--- a/docs/docs/providers/vector_io/inline_sqlite_vec.mdx
+++ b/docs/docs/providers/vector_io/inline_sqlite_vec.mdx
@@ -16,8 +16,8 @@ Please refer to the sqlite-vec provider documentation.
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `db_path` | `` | No | | Path to the SQLite database file |
-| `persistence` | `` | No | | Config for KV store backend (SQLite only for now) |
+| `db_path` | `str` | No | | Path to the SQLite database file |
+| `persistence` | `KVStoreReference` | No | | Config for KV store backend (SQLite only for now) |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/remote_chromadb.mdx b/docs/docs/providers/vector_io/remote_chromadb.mdx
index 2aee3eeca..970f4420f 100644
--- a/docs/docs/providers/vector_io/remote_chromadb.mdx
+++ b/docs/docs/providers/vector_io/remote_chromadb.mdx
@@ -78,7 +78,7 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
| `url` | `str \| None` | No | | |
-| `persistence` | `` | No | | Config for KV store backend |
+| `persistence` | `KVStoreReference` | No | | Config for KV store backend |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/remote_milvus.mdx b/docs/docs/providers/vector_io/remote_milvus.mdx
index bf9935d61..3e8ae71cf 100644
--- a/docs/docs/providers/vector_io/remote_milvus.mdx
+++ b/docs/docs/providers/vector_io/remote_milvus.mdx
@@ -405,10 +405,10 @@ For more details on TLS configuration, refer to the [TLS setup guide](https://mi
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
-| `uri` | `` | No | | The URI of the Milvus server |
+| `uri` | `str` | No | | The URI of the Milvus server |
| `token` | `str \| None` | No | | The token of the Milvus server |
-| `consistency_level` | `` | No | Strong | The consistency level of the Milvus server |
-| `persistence` | `` | No | | Config for KV store backend |
+| `consistency_level` | `str` | No | Strong | The consistency level of the Milvus server |
+| `persistence` | `KVStoreReference` | No | | Config for KV store backend |
| `config` | `dict` | No | `{}` | This configuration allows additional fields to be passed through to the underlying Milvus client. See the [Milvus](https://milvus.io/docs/install-overview.md) documentation for more details about Milvus in general. |
:::note
diff --git a/docs/docs/providers/vector_io/remote_pgvector.mdx b/docs/docs/providers/vector_io/remote_pgvector.mdx
index cb70f35d1..cd69e2b2f 100644
--- a/docs/docs/providers/vector_io/remote_pgvector.mdx
+++ b/docs/docs/providers/vector_io/remote_pgvector.mdx
@@ -218,7 +218,7 @@ See [PGVector's documentation](https://github.com/pgvector/pgvector) for more de
| `db` | `str \| None` | No | postgres | |
| `user` | `str \| None` | No | postgres | |
| `password` | `str \| None` | No | mysecretpassword | |
-| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
+| `persistence` | `KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/remote_qdrant.mdx b/docs/docs/providers/vector_io/remote_qdrant.mdx
index dff9642b5..9b5117bcb 100644
--- a/docs/docs/providers/vector_io/remote_qdrant.mdx
+++ b/docs/docs/providers/vector_io/remote_qdrant.mdx
@@ -19,14 +19,14 @@ Please refer to the inline provider documentation.
| `location` | `str \| None` | No | | |
| `url` | `str \| None` | No | | |
| `port` | `int \| None` | No | 6333 | |
-| `grpc_port` | `` | No | 6334 | |
-| `prefer_grpc` | `` | No | False | |
+| `grpc_port` | `int` | No | 6334 | |
+| `prefer_grpc` | `bool` | No | False | |
| `https` | `bool \| None` | No | | |
| `api_key` | `str \| None` | No | | |
| `prefix` | `str \| None` | No | | |
| `timeout` | `int \| None` | No | | |
| `host` | `str \| None` | No | | |
-| `persistence` | `` | No | | |
+| `persistence` | `KVStoreReference` | No | | |
## Sample Configuration
diff --git a/docs/docs/providers/vector_io/remote_weaviate.mdx b/docs/docs/providers/vector_io/remote_weaviate.mdx
index b809bed2e..7a29d0d48 100644
--- a/docs/docs/providers/vector_io/remote_weaviate.mdx
+++ b/docs/docs/providers/vector_io/remote_weaviate.mdx
@@ -75,7 +75,7 @@ See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more
|-------|------|----------|---------|-------------|
| `weaviate_api_key` | `str \| None` | No | | The API key for the Weaviate instance |
| `weaviate_cluster_url` | `str \| None` | No | localhost:8080 | The URL of the Weaviate cluster |
-| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
+| `persistence` | `KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
## Sample Configuration
diff --git a/docs/package-lock.json b/docs/package-lock.json
index 9a435846f..2a548914c 100644
--- a/docs/package-lock.json
+++ b/docs/package-lock.json
@@ -10712,12 +10712,6 @@
"integrity": "sha512-QMUezzXWII9EV5aTFXW1UBVUO77wYPpjqIF8/AviUCThNeSYZykpoTixUeaNNBwmCev0AMDWMAni+f8Hxb1IFw==",
"license": "Unlicense"
},
- "node_modules/fs.realpath": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
- "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
- "license": "ISC"
- },
"node_modules/fsevents": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
@@ -10821,21 +10815,20 @@
"license": "ISC"
},
"node_modules/glob": {
- "version": "7.2.3",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
- "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
- "deprecated": "Glob versions prior to v9 are no longer supported",
+ "version": "10.5.0",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz",
+ "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==",
"license": "ISC",
"dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.1.1",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
+ "foreground-child": "^3.1.0",
+ "jackspeak": "^3.1.2",
+ "minimatch": "^9.0.4",
+ "minipass": "^7.1.2",
+ "package-json-from-dist": "^1.0.0",
+ "path-scurry": "^1.11.1"
},
- "engines": {
- "node": "*"
+ "bin": {
+ "glob": "dist/esm/bin.mjs"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
@@ -10859,26 +10852,19 @@
"integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==",
"license": "BSD-2-Clause"
},
- "node_modules/glob/node_modules/brace-expansion": {
- "version": "1.1.12",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
- "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
- "license": "MIT",
- "dependencies": {
- "balanced-match": "^1.0.0",
- "concat-map": "0.0.1"
- }
- },
"node_modules/glob/node_modules/minimatch": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
- "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+ "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"license": "ISC",
"dependencies": {
- "brace-expansion": "^1.1.7"
+ "brace-expansion": "^2.0.1"
},
"engines": {
- "node": "*"
+ "node": ">=16 || 14 >=14.17"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/global-dirs": {
@@ -11792,17 +11778,6 @@
"node": ">=12"
}
},
- "node_modules/inflight": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
- "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
- "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.",
- "license": "ISC",
- "dependencies": {
- "once": "^1.3.0",
- "wrappy": "1"
- }
- },
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
@@ -15570,15 +15545,6 @@
"node": ">= 0.8"
}
},
- "node_modules/once": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
- "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
- "license": "ISC",
- "dependencies": {
- "wrappy": "1"
- }
- },
"node_modules/onetime": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
@@ -15955,15 +15921,6 @@
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
}
},
- "node_modules/path-is-absolute": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
- "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
- "license": "MIT",
- "engines": {
- "node": ">=0.10.0"
- }
- },
"node_modules/path-is-inside": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz",
@@ -20038,41 +19995,6 @@
"node": ">= 6"
}
},
- "node_modules/sucrase/node_modules/glob": {
- "version": "10.4.5",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz",
- "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==",
- "license": "ISC",
- "dependencies": {
- "foreground-child": "^3.1.0",
- "jackspeak": "^3.1.2",
- "minimatch": "^9.0.4",
- "minipass": "^7.1.2",
- "package-json-from-dist": "^1.0.0",
- "path-scurry": "^1.11.1"
- },
- "bin": {
- "glob": "dist/esm/bin.mjs"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/sucrase/node_modules/minimatch": {
- "version": "9.0.5",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
- "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
- "license": "ISC",
- "dependencies": {
- "brace-expansion": "^2.0.1"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
@@ -21620,12 +21542,6 @@
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
- "node_modules/wrappy": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
- "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
- "license": "ISC"
- },
"node_modules/write-file-atomic": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz",
diff --git a/docs/package.json b/docs/package.json
index d435c65a9..ca4d02ca1 100644
--- a/docs/package.json
+++ b/docs/package.json
@@ -31,6 +31,9 @@
"react-dom": "^19.0.0",
"remark-code-import": "^1.2.0"
},
+ "overrides": {
+ "glob": "^10.5.0"
+ },
"browserslist": {
"production": [
">0.5%",
diff --git a/docs/static/deprecated-llama-stack-spec.yaml b/docs/static/deprecated-llama-stack-spec.yaml
index 558fbbf6c..aae0fbe44 100644
--- a/docs/static/deprecated-llama-stack-spec.yaml
+++ b/docs/static/deprecated-llama-stack-spec.yaml
@@ -193,7 +193,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/RegisterScoringFunctionRequestLoose'
+ $ref: '#/components/schemas/RegisterScoringFunctionRequest'
required: true
deprecated: true
/v1/scoring-functions/{scoring_fn_id}:
@@ -549,7 +549,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/RegisterDatasetRequestLoose'
+ $ref: '#/components/schemas/RegisterDatasetRequest'
required: true
deprecated: true
/v1beta/datasets/{dataset_id}:
@@ -3572,9 +3572,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -3974,6 +3975,11 @@ components:
anyOf:
- type: string
- type: 'null'
+ parallel_tool_calls:
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -4100,9 +4106,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -6704,9 +6711,21 @@ components:
title: Object
default: vector_store.file
attributes:
- additionalProperties: true
+ additionalProperties:
+ anyOf:
+ - type: string
+ maxLength: 512
+ - type: number
+ - type: boolean
+ title: string | number | boolean
+ propertyNames:
+ type: string
+ maxLength: 64
type: object
+ maxProperties: 16
title: Attributes
+ description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers.
+ x-oaiTypeLabel: map
chunking_strategy:
oneOf:
- $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto'
@@ -7435,6 +7454,14 @@ components:
- scores
title: EvaluateResponse
description: The response from an evaluation.
+ RunEvalRequest:
+ properties:
+ benchmark_config:
+ $ref: '#/components/schemas/BenchmarkConfig'
+ type: object
+ required:
+ - benchmark_config
+ title: RunEvalRequest
Job:
properties:
job_id:
@@ -8018,6 +8045,67 @@ components:
- $ref: '#/components/schemas/CompletionInputType'
title: CompletionInputType
title: StringType | ... (9 variants)
+ RegisterScoringFunctionRequest:
+ properties:
+ scoring_fn_id:
+ type: string
+ title: Scoring Fn Id
+ description:
+ type: string
+ title: Description
+ return_type:
+ anyOf:
+ - $ref: '#/components/schemas/StringType'
+ title: StringType
+ - $ref: '#/components/schemas/NumberType'
+ title: NumberType
+ - $ref: '#/components/schemas/BooleanType'
+ title: BooleanType
+ - $ref: '#/components/schemas/ArrayType'
+ title: ArrayType
+ - $ref: '#/components/schemas/ObjectType'
+ title: ObjectType
+ - $ref: '#/components/schemas/JsonType'
+ title: JsonType
+ - $ref: '#/components/schemas/UnionType'
+ title: UnionType
+ - $ref: '#/components/schemas/ChatCompletionInputType'
+ title: ChatCompletionInputType
+ - $ref: '#/components/schemas/CompletionInputType'
+ title: CompletionInputType
+ title: StringType | ... (9 variants)
+ provider_scoring_fn_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ provider_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ params:
+ anyOf:
+ - oneOf:
+ - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams'
+ title: LLMAsJudgeScoringFnParams
+ - $ref: '#/components/schemas/RegexParserScoringFnParams'
+ title: RegexParserScoringFnParams
+ - $ref: '#/components/schemas/BasicScoringFnParams'
+ title: BasicScoringFnParams
+ discriminator:
+ propertyName: type
+ mapping:
+ basic: '#/components/schemas/BasicScoringFnParams'
+ llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams'
+ regex_parser: '#/components/schemas/RegexParserScoringFnParams'
+ title: LLMAsJudgeScoringFnParams | RegexParserScoringFnParams | BasicScoringFnParams
+ - type: 'null'
+ title: Params
+ type: object
+ required:
+ - scoring_fn_id
+ - description
+ - return_type
+ title: RegisterScoringFunctionRequest
RegisterShieldRequest:
properties:
shield_id:
@@ -8076,6 +8164,31 @@ components:
- $ref: '#/components/schemas/RowsDataSource'
title: RowsDataSource
title: URIDataSource | RowsDataSource
+ RegisterDatasetRequest:
+ properties:
+ purpose:
+ $ref: '#/components/schemas/DatasetPurpose'
+ source:
+ anyOf:
+ - $ref: '#/components/schemas/URIDataSource'
+ title: URIDataSource
+ - $ref: '#/components/schemas/RowsDataSource'
+ title: RowsDataSource
+ title: URIDataSource | RowsDataSource
+ metadata:
+ anyOf:
+ - additionalProperties: true
+ type: object
+ - type: 'null'
+ dataset_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ type: object
+ required:
+ - purpose
+ - source
+ title: RegisterDatasetRequest
RegisterBenchmarkRequest:
properties:
benchmark_id:
@@ -8812,41 +8925,6 @@ components:
required:
- reasoning_tokens
title: OutputTokensDetails
- RegisterDatasetRequestLoose:
- properties:
- purpose:
- title: Purpose
- source:
- title: Source
- metadata:
- title: Metadata
- dataset_id:
- title: Dataset Id
- type: object
- required:
- - purpose
- - source
- title: RegisterDatasetRequestLoose
- RegisterScoringFunctionRequestLoose:
- properties:
- scoring_fn_id:
- title: Scoring Fn Id
- description:
- title: Description
- return_type:
- title: Return Type
- provider_scoring_fn_id:
- title: Provider Scoring Fn Id
- provider_id:
- title: Provider Id
- params:
- title: Params
- type: object
- required:
- - scoring_fn_id
- - description
- - return_type
- title: RegisterScoringFunctionRequestLoose
SearchRankingOptions:
properties:
ranker:
diff --git a/docs/static/experimental-llama-stack-spec.yaml b/docs/static/experimental-llama-stack-spec.yaml
index 79a18161b..0fcbe8ba1 100644
--- a/docs/static/experimental-llama-stack-spec.yaml
+++ b/docs/static/experimental-llama-stack-spec.yaml
@@ -300,7 +300,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/BenchmarkConfig'
+ $ref: '#/components/schemas/RunEvalRequest'
required: true
/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}:
get:
@@ -3297,9 +3297,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -3696,9 +3697,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -6093,9 +6095,21 @@ components:
title: Object
default: vector_store.file
attributes:
- additionalProperties: true
+ additionalProperties:
+ anyOf:
+ - type: string
+ maxLength: 512
+ - type: number
+ - type: boolean
+ title: string | number | boolean
+ propertyNames:
+ type: string
+ maxLength: 64
type: object
+ maxProperties: 16
title: Attributes
+ description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers.
+ x-oaiTypeLabel: map
chunking_strategy:
oneOf:
- $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto'
@@ -6745,6 +6759,14 @@ components:
- scores
title: EvaluateResponse
description: The response from an evaluation.
+ RunEvalRequest:
+ properties:
+ benchmark_config:
+ $ref: '#/components/schemas/BenchmarkConfig'
+ type: object
+ required:
+ - benchmark_config
+ title: RunEvalRequest
Job:
properties:
job_id:
diff --git a/docs/static/llama-stack-spec.yaml b/docs/static/llama-stack-spec.yaml
index 3aebf95cb..403e01dd1 100644
--- a/docs/static/llama-stack-spec.yaml
+++ b/docs/static/llama-stack-spec.yaml
@@ -5760,9 +5760,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -6162,6 +6163,11 @@ components:
anyOf:
- type: string
- type: 'null'
+ parallel_tool_calls:
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -6288,9 +6294,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -8892,9 +8899,21 @@ components:
title: Object
default: vector_store.file
attributes:
- additionalProperties: true
+ additionalProperties:
+ anyOf:
+ - type: string
+ maxLength: 512
+ - type: number
+ - type: boolean
+ title: string | number | boolean
+ propertyNames:
+ type: string
+ maxLength: 64
type: object
+ maxProperties: 16
title: Attributes
+ description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers.
+ x-oaiTypeLabel: map
chunking_strategy:
oneOf:
- $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto'
diff --git a/docs/static/stainless-llama-stack-spec.yaml b/docs/static/stainless-llama-stack-spec.yaml
index 7e81dbd60..e658a6237 100644
--- a/docs/static/stainless-llama-stack-spec.yaml
+++ b/docs/static/stainless-llama-stack-spec.yaml
@@ -1820,7 +1820,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/RegisterScoringFunctionRequestLoose'
+ $ref: '#/components/schemas/RegisterScoringFunctionRequest'
required: true
deprecated: true
/v1/scoring-functions/{scoring_fn_id}:
@@ -3310,7 +3310,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/RegisterDatasetRequestLoose'
+ $ref: '#/components/schemas/RegisterDatasetRequest'
required: true
deprecated: true
/v1beta/datasets/{dataset_id}:
@@ -3567,7 +3567,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/BenchmarkConfig'
+ $ref: '#/components/schemas/RunEvalRequest'
required: true
/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}:
get:
@@ -6739,9 +6739,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -7141,6 +7142,11 @@ components:
anyOf:
- type: string
- type: 'null'
+ parallel_tool_calls:
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -7267,9 +7273,10 @@ components:
type: array
title: Output
parallel_tool_calls:
- type: boolean
- title: Parallel Tool Calls
- default: false
+ anyOf:
+ - type: boolean
+ - type: 'null'
+ default: true
previous_response_id:
anyOf:
- type: string
@@ -9871,9 +9878,21 @@ components:
title: Object
default: vector_store.file
attributes:
- additionalProperties: true
+ additionalProperties:
+ anyOf:
+ - type: string
+ maxLength: 512
+ - type: number
+ - type: boolean
+ title: string | number | boolean
+ propertyNames:
+ type: string
+ maxLength: 64
type: object
+ maxProperties: 16
title: Attributes
+ description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers.
+ x-oaiTypeLabel: map
chunking_strategy:
oneOf:
- $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto'
@@ -10602,6 +10621,14 @@ components:
- scores
title: EvaluateResponse
description: The response from an evaluation.
+ RunEvalRequest:
+ properties:
+ benchmark_config:
+ $ref: '#/components/schemas/BenchmarkConfig'
+ type: object
+ required:
+ - benchmark_config
+ title: RunEvalRequest
Job:
properties:
job_id:
@@ -11185,6 +11212,67 @@ components:
- $ref: '#/components/schemas/CompletionInputType'
title: CompletionInputType
title: StringType | ... (9 variants)
+ RegisterScoringFunctionRequest:
+ properties:
+ scoring_fn_id:
+ type: string
+ title: Scoring Fn Id
+ description:
+ type: string
+ title: Description
+ return_type:
+ anyOf:
+ - $ref: '#/components/schemas/StringType'
+ title: StringType
+ - $ref: '#/components/schemas/NumberType'
+ title: NumberType
+ - $ref: '#/components/schemas/BooleanType'
+ title: BooleanType
+ - $ref: '#/components/schemas/ArrayType'
+ title: ArrayType
+ - $ref: '#/components/schemas/ObjectType'
+ title: ObjectType
+ - $ref: '#/components/schemas/JsonType'
+ title: JsonType
+ - $ref: '#/components/schemas/UnionType'
+ title: UnionType
+ - $ref: '#/components/schemas/ChatCompletionInputType'
+ title: ChatCompletionInputType
+ - $ref: '#/components/schemas/CompletionInputType'
+ title: CompletionInputType
+ title: StringType | ... (9 variants)
+ provider_scoring_fn_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ provider_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ params:
+ anyOf:
+ - oneOf:
+ - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams'
+ title: LLMAsJudgeScoringFnParams
+ - $ref: '#/components/schemas/RegexParserScoringFnParams'
+ title: RegexParserScoringFnParams
+ - $ref: '#/components/schemas/BasicScoringFnParams'
+ title: BasicScoringFnParams
+ discriminator:
+ propertyName: type
+ mapping:
+ basic: '#/components/schemas/BasicScoringFnParams'
+ llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams'
+ regex_parser: '#/components/schemas/RegexParserScoringFnParams'
+ title: LLMAsJudgeScoringFnParams | RegexParserScoringFnParams | BasicScoringFnParams
+ - type: 'null'
+ title: Params
+ type: object
+ required:
+ - scoring_fn_id
+ - description
+ - return_type
+ title: RegisterScoringFunctionRequest
RegisterShieldRequest:
properties:
shield_id:
@@ -11243,6 +11331,31 @@ components:
- $ref: '#/components/schemas/RowsDataSource'
title: RowsDataSource
title: URIDataSource | RowsDataSource
+ RegisterDatasetRequest:
+ properties:
+ purpose:
+ $ref: '#/components/schemas/DatasetPurpose'
+ source:
+ anyOf:
+ - $ref: '#/components/schemas/URIDataSource'
+ title: URIDataSource
+ - $ref: '#/components/schemas/RowsDataSource'
+ title: RowsDataSource
+ title: URIDataSource | RowsDataSource
+ metadata:
+ anyOf:
+ - additionalProperties: true
+ type: object
+ - type: 'null'
+ dataset_id:
+ anyOf:
+ - type: string
+ - type: 'null'
+ type: object
+ required:
+ - purpose
+ - source
+ title: RegisterDatasetRequest
RegisterBenchmarkRequest:
properties:
benchmark_id:
@@ -11979,41 +12092,6 @@ components:
required:
- reasoning_tokens
title: OutputTokensDetails
- RegisterDatasetRequestLoose:
- properties:
- purpose:
- title: Purpose
- source:
- title: Source
- metadata:
- title: Metadata
- dataset_id:
- title: Dataset Id
- type: object
- required:
- - purpose
- - source
- title: RegisterDatasetRequestLoose
- RegisterScoringFunctionRequestLoose:
- properties:
- scoring_fn_id:
- title: Scoring Fn Id
- description:
- title: Description
- return_type:
- title: Return Type
- provider_scoring_fn_id:
- title: Provider Scoring Fn Id
- provider_id:
- title: Provider Id
- params:
- title: Params
- type: object
- required:
- - scoring_fn_id
- - description
- - return_type
- title: RegisterScoringFunctionRequestLoose
SearchRankingOptions:
properties:
ranker:
diff --git a/pyproject.toml b/pyproject.toml
index bdf8309ad..3e16dc08f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,7 +38,6 @@ dependencies = [
"pyjwt[crypto]>=2.10.0", # Pull crypto to support RS256 for jwt. Requires 2.10.0+ for ssl_context support.
"pydantic>=2.11.9",
"rich",
- "starlette",
"termcolor",
"tiktoken",
"pillow",
@@ -50,7 +49,6 @@ dependencies = [
"aiosqlite>=0.21.0", # server - for metadata store
"asyncpg", # for metadata store
"sqlalchemy[asyncio]>=2.0.41", # server - for conversations
- "pyyaml>=6.0.2",
"starlette>=0.49.1",
]
@@ -358,6 +356,10 @@ exclude = [
module = [
"yaml",
"fire",
+ "redis.asyncio",
+ "psycopg2",
+ "psycopg2.extras",
+ "psycopg2.extensions",
"torchtune.*",
"fairscale.*",
"torchvision.*",
diff --git a/scripts/docker.sh b/scripts/docker.sh
index b56df8c03..3b2db5ca7 100755
--- a/scripts/docker.sh
+++ b/scripts/docker.sh
@@ -287,9 +287,9 @@ start_container() {
# On macOS/Windows, use host.docker.internal to reach host from container
# On Linux with --network host, use localhost
if [[ "$(uname)" == "Darwin" ]] || [[ "$(uname)" == *"MINGW"* ]]; then
- OLLAMA_URL="${OLLAMA_URL:-http://host.docker.internal:11434}"
+ OLLAMA_URL="${OLLAMA_URL:-http://host.docker.internal:11434/v1}"
else
- OLLAMA_URL="${OLLAMA_URL:-http://localhost:11434}"
+ OLLAMA_URL="${OLLAMA_URL:-http://localhost:11434/v1}"
fi
DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OLLAMA_URL=$OLLAMA_URL"
diff --git a/scripts/get_setup_env.py b/scripts/get_setup_env.py
index fad601e76..755cfefea 100755
--- a/scripts/get_setup_env.py
+++ b/scripts/get_setup_env.py
@@ -16,16 +16,16 @@ import sys
from tests.integration.suites import SETUP_DEFINITIONS, SUITE_DEFINITIONS
-def get_setup_env_vars(setup_name, suite_name=None):
+def get_setup_config(setup_name, suite_name=None):
"""
- Get environment variables for a setup, with optional suite default fallback.
+ Get full configuration (env vars + defaults) for a setup.
Args:
setup_name: Name of the setup (e.g., 'ollama', 'gpt')
suite_name: Optional suite name to get default setup if setup_name is None
Returns:
- Dictionary of environment variables
+ Dictionary with 'env' and 'defaults' keys
"""
# If no setup specified, try to get default from suite
if not setup_name and suite_name:
@@ -34,7 +34,7 @@ def get_setup_env_vars(setup_name, suite_name=None):
setup_name = suite.default_setup
if not setup_name:
- return {}
+ return {"env": {}, "defaults": {}}
setup = SETUP_DEFINITIONS.get(setup_name)
if not setup:
@@ -44,27 +44,31 @@ def get_setup_env_vars(setup_name, suite_name=None):
)
sys.exit(1)
- return setup.env
+ return {"env": setup.env, "defaults": setup.defaults}
def main():
- parser = argparse.ArgumentParser(description="Extract environment variables from a test setup")
+ parser = argparse.ArgumentParser(description="Extract environment variables and defaults from a test setup")
parser.add_argument("--setup", help="Setup name (e.g., ollama, gpt)")
parser.add_argument("--suite", help="Suite name to get default setup from if --setup not provided")
parser.add_argument("--format", choices=["bash", "json"], default="bash", help="Output format (default: bash)")
args = parser.parse_args()
- env_vars = get_setup_env_vars(args.setup, args.suite)
+ config = get_setup_config(args.setup, args.suite)
if args.format == "bash":
- # Output as bash export statements
- for key, value in env_vars.items():
+ # Output env vars as bash export statements
+ for key, value in config["env"].items():
print(f"export {key}='{value}'")
+ # Output defaults as bash export statements with LLAMA_STACK_TEST_ prefix
+ for key, value in config["defaults"].items():
+ env_key = f"LLAMA_STACK_TEST_{key.upper()}"
+ print(f"export {env_key}='{value}'")
elif args.format == "json":
import json
- print(json.dumps(env_vars))
+ print(json.dumps(config))
if __name__ == "__main__":
diff --git a/scripts/install.sh b/scripts/install.sh
index 5e4939767..7fe1d3243 100755
--- a/scripts/install.sh
+++ b/scripts/install.sh
@@ -640,7 +640,7 @@ cmd=( run -d "${PLATFORM_OPTS[@]}" --name llama-stack \
--network llama-net \
-p "${PORT}:${PORT}" \
"${server_env_opts[@]}" \
- -e OLLAMA_URL="http://ollama-server:${OLLAMA_PORT}" \
+ -e OLLAMA_URL="http://ollama-server:${OLLAMA_PORT}/v1" \
"${SERVER_IMAGE}" --port "${PORT}")
log "š¦ Starting Llama Stack..."
diff --git a/scripts/integration-tests.sh b/scripts/integration-tests.sh
index 8b0002125..2adef892d 100755
--- a/scripts/integration-tests.sh
+++ b/scripts/integration-tests.sh
@@ -20,6 +20,7 @@ TEST_PATTERN=""
INFERENCE_MODE="replay"
EXTRA_PARAMS=""
COLLECT_ONLY=false
+TYPESCRIPT_ONLY=false
# Function to display usage
usage() {
@@ -34,6 +35,7 @@ Options:
--subdirs STRING Comma-separated list of test subdirectories to run (overrides suite)
--pattern STRING Regex pattern to pass to pytest -k
--collect-only Collect tests only without running them (skips server startup)
+ --typescript-only Skip Python tests and run only TypeScript client tests
--help Show this help message
Suites are defined in tests/integration/suites.py and define which tests to run.
@@ -90,6 +92,10 @@ while [[ $# -gt 0 ]]; do
COLLECT_ONLY=true
shift
;;
+ --typescript-only)
+ TYPESCRIPT_ONLY=true
+ shift
+ ;;
--help)
usage
exit 0
@@ -181,6 +187,10 @@ echo "$SETUP_ENV"
eval "$SETUP_ENV"
echo ""
+# Export suite and setup names for TypeScript tests
+export LLAMA_STACK_TEST_SUITE="$TEST_SUITE"
+export LLAMA_STACK_TEST_SETUP="$TEST_SETUP"
+
ROOT_DIR="$THIS_DIR/.."
cd $ROOT_DIR
@@ -212,6 +222,71 @@ find_available_port() {
return 1
}
+run_client_ts_tests() {
+ if ! command -v npm &>/dev/null; then
+ echo "npm could not be found; ensure Node.js is installed"
+ return 1
+ fi
+
+ pushd tests/integration/client-typescript >/dev/null
+
+ # Determine if TS_CLIENT_PATH is a directory path or an npm version
+ if [[ -d "$TS_CLIENT_PATH" ]]; then
+ # It's a directory path - use local checkout
+ if [[ ! -f "$TS_CLIENT_PATH/package.json" ]]; then
+ echo "Error: $TS_CLIENT_PATH exists but doesn't look like llama-stack-client-typescript (no package.json)"
+ popd >/dev/null
+ return 1
+ fi
+ echo "Using local llama-stack-client-typescript from: $TS_CLIENT_PATH"
+
+ # Build the TypeScript client first
+ echo "Building TypeScript client..."
+ pushd "$TS_CLIENT_PATH" >/dev/null
+ npm install --silent
+ npm run build --silent
+ popd >/dev/null
+
+ # Install other dependencies first
+ if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then
+ npm ci --silent
+ else
+ npm install --silent
+ fi
+
+ # Then install the client from local directory
+ echo "Installing llama-stack-client from: $TS_CLIENT_PATH"
+ npm install "$TS_CLIENT_PATH" --silent
+ else
+ # It's an npm version specifier - install from npm
+ echo "Installing llama-stack-client@${TS_CLIENT_PATH} from npm"
+ if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then
+ npm ci --silent
+ npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
+ else
+ npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
+ fi
+ fi
+
+ # Verify installation
+ echo "Verifying llama-stack-client installation..."
+ if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then
+ echo "ā
llama-stack-client successfully installed"
+ npm list llama-stack-client
+ else
+ echo "ā llama-stack-client not found in node_modules"
+ echo "Installed packages:"
+ npm list --depth=0
+ popd >/dev/null
+ return 1
+ fi
+
+ echo "Running TypeScript tests for suite $TEST_SUITE (setup $TEST_SETUP)"
+ npm test
+
+ popd >/dev/null
+}
+
# Start Llama Stack Server if needed
if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then
# Find an available port for the server
@@ -221,6 +296,7 @@ if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then
exit 1
fi
export LLAMA_STACK_PORT
+ export TEST_API_BASE_URL="http://localhost:$LLAMA_STACK_PORT"
echo "Will use port: $LLAMA_STACK_PORT"
stop_server() {
@@ -298,6 +374,7 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then
exit 1
fi
export LLAMA_STACK_PORT
+ export TEST_API_BASE_URL="http://localhost:$LLAMA_STACK_PORT"
echo "Will use port: $LLAMA_STACK_PORT"
echo "=== Building Docker Image for distribution: $DISTRO ==="
@@ -473,16 +550,23 @@ if [[ -n "$STACK_CONFIG" ]]; then
STACK_CONFIG_ARG="--stack-config=$STACK_CONFIG"
fi
-pytest -s -v $PYTEST_TARGET \
- $STACK_CONFIG_ARG \
- --inference-mode="$INFERENCE_MODE" \
- -k "$PYTEST_PATTERN" \
- $EXTRA_PARAMS \
- --color=yes \
- --embedding-model=sentence-transformers/nomic-ai/nomic-embed-text-v1.5 \
- --color=yes $EXTRA_PARAMS \
- --capture=tee-sys
-exit_code=$?
+# Run Python tests unless typescript-only mode
+if [[ "$TYPESCRIPT_ONLY" == "false" ]]; then
+ pytest -s -v $PYTEST_TARGET \
+ $STACK_CONFIG_ARG \
+ --inference-mode="$INFERENCE_MODE" \
+ -k "$PYTEST_PATTERN" \
+ $EXTRA_PARAMS \
+ --color=yes \
+ --embedding-model=sentence-transformers/nomic-ai/nomic-embed-text-v1.5 \
+ --color=yes $EXTRA_PARAMS \
+ --capture=tee-sys
+ exit_code=$?
+else
+ echo "Skipping Python tests (--typescript-only mode)"
+ exit_code=0
+fi
+
set +x
set -e
@@ -506,5 +590,10 @@ else
exit 1
fi
+# Run TypeScript client tests if TS_CLIENT_PATH is set
+if [[ $exit_code -eq 0 && -n "${TS_CLIENT_PATH:-}" && "${LLAMA_STACK_TEST_STACK_CONFIG_TYPE:-}" == "server" ]]; then
+ run_client_ts_tests
+fi
+
echo ""
echo "=== Integration Tests Complete ==="
diff --git a/scripts/openapi_generator/__init__.py b/scripts/openapi_generator/__init__.py
index 7f6aaa1d1..834836f76 100644
--- a/scripts/openapi_generator/__init__.py
+++ b/scripts/openapi_generator/__init__.py
@@ -11,6 +11,13 @@ This module provides functionality to generate OpenAPI specifications
from FastAPI applications.
"""
-from .main import generate_openapi_spec, main
-
__all__ = ["generate_openapi_spec", "main"]
+
+
+def __getattr__(name: str):
+ if name in {"generate_openapi_spec", "main"}:
+ from .main import generate_openapi_spec as _gos
+ from .main import main as _main
+
+ return {"generate_openapi_spec": _gos, "main": _main}[name]
+ raise AttributeError(name)
diff --git a/scripts/openapi_generator/endpoints.py b/scripts/openapi_generator/endpoints.py
index 39086f47f..85203cb71 100644
--- a/scripts/openapi_generator/endpoints.py
+++ b/scripts/openapi_generator/endpoints.py
@@ -15,6 +15,7 @@ import typing
from typing import Annotated, Any, get_args, get_origin
from fastapi import FastAPI
+from fastapi.params import Body as FastAPIBody
from pydantic import Field, create_model
from llama_stack.log import get_logger
@@ -26,6 +27,8 @@ from .state import _extra_body_fields, register_dynamic_model
logger = get_logger(name=__name__, category="core")
+type QueryParameter = tuple[str, type, Any, bool]
+
def _to_pascal_case(segment: str) -> str:
tokens = re.findall(r"[A-Za-z]+|\d+", segment)
@@ -75,12 +78,12 @@ def _create_endpoint_with_request_model(
return endpoint
-def _build_field_definitions(query_parameters: list[tuple[str, type, Any]], use_any: bool = False) -> dict[str, tuple]:
+def _build_field_definitions(query_parameters: list[QueryParameter], use_any: bool = False) -> dict[str, tuple]:
"""Build field definitions for a Pydantic model from query parameters."""
from typing import Any
field_definitions = {}
- for param_name, param_type, default_value in query_parameters:
+ for param_name, param_type, default_value, _ in query_parameters:
if use_any:
field_definitions[param_name] = (Any, ... if default_value is inspect.Parameter.empty else default_value)
continue
@@ -108,10 +111,10 @@ def _build_field_definitions(query_parameters: list[tuple[str, type, Any]], use_
field_definitions[param_name] = (Any, ... if default_value is inspect.Parameter.empty else default_value)
# Ensure all parameters are included
- expected_params = {name for name, _, _ in query_parameters}
+ expected_params = {name for name, _, _, _ in query_parameters}
missing = expected_params - set(field_definitions.keys())
if missing:
- for param_name, _, default_value in query_parameters:
+ for param_name, _, default_value, _ in query_parameters:
if param_name in missing:
field_definitions[param_name] = (
Any,
@@ -126,7 +129,7 @@ def _create_dynamic_request_model(
webmethod,
method_name: str,
http_method: str,
- query_parameters: list[tuple[str, type, Any]],
+ query_parameters: list[QueryParameter],
use_any: bool = False,
variant_suffix: str | None = None,
) -> type | None:
@@ -143,12 +146,12 @@ def _create_dynamic_request_model(
def _build_signature_params(
- query_parameters: list[tuple[str, type, Any]],
+ query_parameters: list[QueryParameter],
) -> tuple[list[inspect.Parameter], dict[str, type]]:
"""Build signature parameters and annotations from query parameters."""
signature_params = []
param_annotations = {}
- for param_name, param_type, default_value in query_parameters:
+ for param_name, param_type, default_value, _ in query_parameters:
param_annotations[param_name] = param_type
signature_params.append(
inspect.Parameter(
@@ -219,6 +222,19 @@ def _is_extra_body_field(metadata_item: Any) -> bool:
return isinstance(metadata_item, ExtraBodyField)
+def _should_embed_parameter(param_type: Any) -> bool:
+ """Determine whether a parameter should be embedded (wrapped) in the request body."""
+ if get_origin(param_type) is Annotated:
+ args = get_args(param_type)
+ metadata = args[1:] if len(args) > 1 else []
+ for metadata_item in metadata:
+ if isinstance(metadata_item, FastAPIBody):
+ # FastAPI treats embed=None as False, so default to False when unset.
+ return bool(metadata_item.embed)
+ # Unannotated parameters default to embed=True through create_dynamic_typed_route.
+ return True
+
+
def _is_async_iterator_type(type_obj: Any) -> bool:
"""Check if a type is AsyncIterator or AsyncIterable."""
from collections.abc import AsyncIterable, AsyncIterator
@@ -282,7 +298,7 @@ def _find_models_for_endpoint(
Returns:
tuple: (request_model, response_model, query_parameters, file_form_params, streaming_response_model, response_schema_name)
- where query_parameters is a list of (name, type, default_value) tuples
+ where query_parameters is a list of (name, type, default_value, should_embed) tuples
and file_form_params is a list of inspect.Parameter objects for File()/Form() params
and streaming_response_model is the model for streaming responses (AsyncIterator content)
"""
@@ -299,7 +315,7 @@ def _find_models_for_endpoint(
# Find request model and collect all body parameters
request_model = None
- query_parameters = []
+ query_parameters: list[QueryParameter] = []
file_form_params = []
path_params = set()
extra_body_params = []
@@ -325,6 +341,7 @@ def _find_models_for_endpoint(
# Check if it's a File() or Form() parameter - these need special handling
param_type = param.annotation
+ param_should_embed = _should_embed_parameter(param_type)
if _is_file_or_form_param(param_type):
# File() and Form() parameters must be in the function signature directly
# They cannot be part of a Pydantic model
@@ -350,30 +367,14 @@ def _find_models_for_endpoint(
# Store as extra body parameter - exclude from request model
extra_body_params.append((param_name, base_type, extra_body_description))
continue
+ param_type = base_type
# Check if it's a Pydantic model (for POST/PUT requests)
if hasattr(param_type, "model_json_schema"):
- # Collect all body parameters including Pydantic models
- # We'll decide later whether to use a single model or create a combined one
- query_parameters.append((param_name, param_type, param.default))
- elif get_origin(param_type) is Annotated:
- # Handle Annotated types - get the base type
- args = get_args(param_type)
- if args and hasattr(args[0], "model_json_schema"):
- # Collect Pydantic models from Annotated types
- query_parameters.append((param_name, args[0], param.default))
- else:
- # Regular annotated parameter (but not File/Form, already handled above)
- query_parameters.append((param_name, param_type, param.default))
+ query_parameters.append((param_name, param_type, param.default, param_should_embed))
else:
- # This is likely a body parameter for POST/PUT or query parameter for GET
- # Store the parameter info for later use
- # Preserve inspect.Parameter.empty to distinguish "no default" from "default=None"
- default_value = param.default
-
- # Extract the base type from union types (e.g., str | None -> str)
- # Also make it safe for FastAPI to avoid forward reference issues
- query_parameters.append((param_name, param_type, default_value))
+ # Regular annotated parameter (but not File/Form, already handled above)
+ query_parameters.append((param_name, param_type, param.default, param_should_embed))
# Store extra body fields for later use in post-processing
# We'll store them when the endpoint is created, as we need the full path
@@ -385,8 +386,8 @@ def _find_models_for_endpoint(
# Otherwise, we'll create a combined request model from all parameters
# BUT: For GET requests, never create a request body - all parameters should be query parameters
if is_post_put and len(query_parameters) == 1:
- param_name, param_type, default_value = query_parameters[0]
- if hasattr(param_type, "model_json_schema"):
+ param_name, param_type, default_value, should_embed = query_parameters[0]
+ if hasattr(param_type, "model_json_schema") and not should_embed:
request_model = param_type
query_parameters = [] # Clear query_parameters so we use the single model
@@ -495,7 +496,7 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod, api: Api):
if file_form_params and is_post_put:
signature_params = list(file_form_params)
param_annotations = {param.name: param.annotation for param in file_form_params}
- for param_name, param_type, default_value in query_parameters:
+ for param_name, param_type, default_value, _ in query_parameters:
signature_params.append(
inspect.Parameter(
param_name,
diff --git a/src/llama_stack/providers/utils/kvstore/__init__.py b/scripts/openapi_generator/stainless_config/__init__.py
similarity index 80%
rename from src/llama_stack/providers/utils/kvstore/__init__.py
rename to scripts/openapi_generator/stainless_config/__init__.py
index 470a75d2d..bf44f82ba 100644
--- a/src/llama_stack/providers/utils/kvstore/__init__.py
+++ b/scripts/openapi_generator/stainless_config/__init__.py
@@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from .kvstore import * # noqa: F401, F403
+# Package marker for Stainless config generation.
diff --git a/scripts/openapi_generator/stainless_config/generate_config.py b/scripts/openapi_generator/stainless_config/generate_config.py
new file mode 100644
index 000000000..dabc2119f
--- /dev/null
+++ b/scripts/openapi_generator/stainless_config/generate_config.py
@@ -0,0 +1,821 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from __future__ import annotations
+
+from collections.abc import Iterator
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any
+
+import yaml
+
+HEADER = "# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json\n\n"
+
+SECTION_ORDER = [
+ "organization",
+ "security",
+ "security_schemes",
+ "targets",
+ "client_settings",
+ "environments",
+ "pagination",
+ "settings",
+ "openapi",
+ "readme",
+ "resources",
+]
+
+ORGANIZATION = {
+ "name": "llama-stack-client",
+ "docs": "https://llama-stack.readthedocs.io/en/latest/",
+ "contact": "llamastack@meta.com",
+}
+
+SECURITY = [{}, {"BearerAuth": []}]
+
+SECURITY_SCHEMES = {"BearerAuth": {"type": "http", "scheme": "bearer"}}
+
+TARGETS = {
+ "node": {
+ "package_name": "llama-stack-client",
+ "production_repo": "llamastack/llama-stack-client-typescript",
+ "publish": {"npm": False},
+ },
+ "python": {
+ "package_name": "llama_stack_client",
+ "production_repo": "llamastack/llama-stack-client-python",
+ "options": {"use_uv": True},
+ "publish": {"pypi": True},
+ "project_name": "llama_stack_client",
+ },
+ "kotlin": {
+ "reverse_domain": "com.llama_stack_client.api",
+ "production_repo": None,
+ "publish": {"maven": False},
+ },
+ "go": {
+ "package_name": "llama-stack-client",
+ "production_repo": "llamastack/llama-stack-client-go",
+ "options": {"enable_v2": True, "back_compat_use_shared_package": False},
+ },
+}
+
+CLIENT_SETTINGS = {
+ "default_env_prefix": "LLAMA_STACK_CLIENT",
+ "opts": {
+ "api_key": {
+ "type": "string",
+ "read_env": "LLAMA_STACK_CLIENT_API_KEY",
+ "auth": {"security_scheme": "BearerAuth"},
+ "nullable": True,
+ }
+ },
+}
+
+ENVIRONMENTS = {"production": "http://any-hosted-llama-stack.com"}
+
+PAGINATION = [
+ {
+ "name": "datasets_iterrows",
+ "type": "offset",
+ "request": {
+ "dataset_id": {"type": "string"},
+ "start_index": {
+ "type": "integer",
+ "x-stainless-pagination-property": {"purpose": "offset_count_param"},
+ },
+ "limit": {"type": "integer"},
+ },
+ "response": {
+ "data": {"type": "array", "items": {"type": "object"}},
+ "next_index": {
+ "type": "integer",
+ "x-stainless-pagination-property": {"purpose": "offset_count_start_field"},
+ },
+ },
+ },
+ {
+ "name": "openai_cursor_page",
+ "type": "cursor",
+ "request": {
+ "limit": {"type": "integer"},
+ "after": {
+ "type": "string",
+ "x-stainless-pagination-property": {"purpose": "next_cursor_param"},
+ },
+ },
+ "response": {
+ "data": {"type": "array", "items": {}},
+ "has_more": {"type": "boolean"},
+ "last_id": {
+ "type": "string",
+ "x-stainless-pagination-property": {"purpose": "next_cursor_field"},
+ },
+ },
+ },
+]
+
+SETTINGS = {
+ "license": "MIT",
+ "unwrap_response_fields": ["data"],
+ "file_header": "Copyright (c) Meta Platforms, Inc. and affiliates.\n"
+ "All rights reserved.\n"
+ "\n"
+ "This source code is licensed under the terms described in the "
+ "LICENSE file in\n"
+ "the root directory of this source tree.\n",
+}
+
+OPENAPI = {
+ "transformations": [
+ {
+ "command": "mergeObject",
+ "reason": "Better return_type using enum",
+ "args": {
+ "target": ["$.components.schemas"],
+ "object": {
+ "ReturnType": {
+ "additionalProperties": False,
+ "properties": {
+ "type": {
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "array",
+ "object",
+ "json",
+ "union",
+ "chat_completion_input",
+ "completion_input",
+ "agent_turn_input",
+ ]
+ }
+ },
+ "required": ["type"],
+ "type": "object",
+ }
+ },
+ },
+ },
+ {
+ "command": "replaceProperties",
+ "reason": "Replace return type properties with better model (see above)",
+ "args": {
+ "filter": {
+ "only": [
+ "$.components.schemas.ScoringFn.properties.return_type",
+ "$.components.schemas.RegisterScoringFunctionRequest.properties.return_type",
+ ]
+ },
+ "value": {"$ref": "#/components/schemas/ReturnType"},
+ },
+ },
+ {
+ "command": "oneOfToAnyOf",
+ "reason": "Prism (mock server) doesn't like one of our "
+ "requests as it technically matches multiple "
+ "variants",
+ },
+ ]
+}
+
+README = {
+ "example_requests": {
+ "default": {
+ "type": "request",
+ "endpoint": "post /v1/chat/completions",
+ "params": {},
+ },
+ "headline": {"type": "request", "endpoint": "get /v1/models", "params": {}},
+ "pagination": {
+ "type": "request",
+ "endpoint": "post /v1/chat/completions",
+ "params": {},
+ },
+ }
+}
+
+ALL_RESOURCES = {
+ "$shared": {
+ "models": {
+ "interleaved_content_item": "InterleavedContentItem",
+ "interleaved_content": "InterleavedContent",
+ "param_type": "ParamType",
+ "safety_violation": "SafetyViolation",
+ "sampling_params": "SamplingParams",
+ "scoring_result": "ScoringResult",
+ "system_message": "SystemMessage",
+ }
+ },
+ "toolgroups": {
+ "models": {
+ "tool_group": "ToolGroup",
+ "list_tool_groups_response": "ListToolGroupsResponse",
+ },
+ "methods": {
+ "register": "post /v1/toolgroups",
+ "get": "get /v1/toolgroups/{toolgroup_id}",
+ "list": "get /v1/toolgroups",
+ "unregister": "delete /v1/toolgroups/{toolgroup_id}",
+ },
+ },
+ "tools": {
+ "methods": {
+ "get": "get /v1/tools/{tool_name}",
+ "list": {"paginated": False, "endpoint": "get /v1/tools"},
+ }
+ },
+ "tool_runtime": {
+ "models": {
+ "tool_def": "ToolDef",
+ "tool_invocation_result": "ToolInvocationResult",
+ },
+ "methods": {
+ "list_tools": {
+ "paginated": False,
+ "endpoint": "get /v1/tool-runtime/list-tools",
+ },
+ "invoke_tool": "post /v1/tool-runtime/invoke",
+ },
+ },
+ "responses": {
+ "models": {
+ "response_object_stream": "OpenAIResponseObjectStream",
+ "response_object": "OpenAIResponseObject",
+ },
+ "methods": {
+ "create": {
+ "type": "http",
+ "streaming": {
+ "stream_event_model": "responses.response_object_stream",
+ "param_discriminator": "stream",
+ },
+ "endpoint": "post /v1/responses",
+ },
+ "retrieve": "get /v1/responses/{response_id}",
+ "list": {"type": "http", "endpoint": "get /v1/responses"},
+ "delete": {
+ "type": "http",
+ "endpoint": "delete /v1/responses/{response_id}",
+ },
+ },
+ "subresources": {
+ "input_items": {
+ "methods": {
+ "list": {
+ "type": "http",
+ "paginated": False,
+ "endpoint": "get /v1/responses/{response_id}/input_items",
+ }
+ }
+ }
+ },
+ },
+ "prompts": {
+ "models": {"prompt": "Prompt", "list_prompts_response": "ListPromptsResponse"},
+ "methods": {
+ "create": "post /v1/prompts",
+ "list": {"paginated": False, "endpoint": "get /v1/prompts"},
+ "retrieve": "get /v1/prompts/{prompt_id}",
+ "update": "post /v1/prompts/{prompt_id}",
+ "delete": "delete /v1/prompts/{prompt_id}",
+ "set_default_version": "post /v1/prompts/{prompt_id}/set-default-version",
+ },
+ "subresources": {
+ "versions": {
+ "methods": {
+ "list": {
+ "paginated": False,
+ "endpoint": "get /v1/prompts/{prompt_id}/versions",
+ }
+ }
+ }
+ },
+ },
+ "conversations": {
+ "models": {"conversation_object": "Conversation"},
+ "methods": {
+ "create": {"type": "http", "endpoint": "post /v1/conversations"},
+ "retrieve": "get /v1/conversations/{conversation_id}",
+ "update": {
+ "type": "http",
+ "endpoint": "post /v1/conversations/{conversation_id}",
+ },
+ "delete": {
+ "type": "http",
+ "endpoint": "delete /v1/conversations/{conversation_id}",
+ },
+ },
+ "subresources": {
+ "items": {
+ "methods": {
+ "get": {
+ "type": "http",
+ "endpoint": "get /v1/conversations/{conversation_id}/items/{item_id}",
+ },
+ "list": {
+ "type": "http",
+ "endpoint": "get /v1/conversations/{conversation_id}/items",
+ },
+ "create": {
+ "type": "http",
+ "endpoint": "post /v1/conversations/{conversation_id}/items",
+ },
+ "delete": {
+ "type": "http",
+ "endpoint": "delete /v1/conversations/{conversation_id}/items/{item_id}",
+ },
+ }
+ }
+ },
+ },
+ "inspect": {
+ "models": {
+ "healthInfo": "HealthInfo",
+ "providerInfo": "ProviderInfo",
+ "routeInfo": "RouteInfo",
+ "versionInfo": "VersionInfo",
+ },
+ "methods": {"health": "get /v1/health", "version": "get /v1/version"},
+ },
+ "embeddings": {
+ "models": {"create_embeddings_response": "OpenAIEmbeddingsResponse"},
+ "methods": {"create": "post /v1/embeddings"},
+ },
+ "chat": {
+ "models": {"chat_completion_chunk": "OpenAIChatCompletionChunk"},
+ "subresources": {
+ "completions": {
+ "methods": {
+ "create": {
+ "type": "http",
+ "streaming": {
+ "stream_event_model": "chat.chat_completion_chunk",
+ "param_discriminator": "stream",
+ },
+ "endpoint": "post /v1/chat/completions",
+ },
+ "list": {
+ "type": "http",
+ "paginated": False,
+ "endpoint": "get /v1/chat/completions",
+ },
+ "retrieve": {
+ "type": "http",
+ "endpoint": "get /v1/chat/completions/{completion_id}",
+ },
+ }
+ }
+ },
+ },
+ "completions": {
+ "methods": {
+ "create": {
+ "type": "http",
+ "streaming": {"param_discriminator": "stream"},
+ "endpoint": "post /v1/completions",
+ }
+ }
+ },
+ "vector_io": {
+ "models": {"queryChunksResponse": "QueryChunksResponse"},
+ "methods": {
+ "insert": "post /v1/vector-io/insert",
+ "query": "post /v1/vector-io/query",
+ },
+ },
+ "vector_stores": {
+ "models": {
+ "vector_store": "VectorStoreObject",
+ "list_vector_stores_response": "VectorStoreListResponse",
+ "vector_store_delete_response": "VectorStoreDeleteResponse",
+ "vector_store_search_response": "VectorStoreSearchResponsePage",
+ },
+ "methods": {
+ "create": "post /v1/vector_stores",
+ "list": "get /v1/vector_stores",
+ "retrieve": "get /v1/vector_stores/{vector_store_id}",
+ "update": "post /v1/vector_stores/{vector_store_id}",
+ "delete": "delete /v1/vector_stores/{vector_store_id}",
+ "search": "post /v1/vector_stores/{vector_store_id}/search",
+ },
+ "subresources": {
+ "files": {
+ "models": {"vector_store_file": "VectorStoreFileObject"},
+ "methods": {
+ "list": "get /v1/vector_stores/{vector_store_id}/files",
+ "retrieve": "get /v1/vector_stores/{vector_store_id}/files/{file_id}",
+ "update": "post /v1/vector_stores/{vector_store_id}/files/{file_id}",
+ "delete": "delete /v1/vector_stores/{vector_store_id}/files/{file_id}",
+ "create": "post /v1/vector_stores/{vector_store_id}/files",
+ "content": "get /v1/vector_stores/{vector_store_id}/files/{file_id}/content",
+ },
+ },
+ "file_batches": {
+ "models": {
+ "vector_store_file_batches": "VectorStoreFileBatchObject",
+ "list_vector_store_files_in_batch_response": "VectorStoreFilesListInBatchResponse",
+ },
+ "methods": {
+ "create": "post /v1/vector_stores/{vector_store_id}/file_batches",
+ "retrieve": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ "list_files": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ "cancel": "post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ },
+ },
+ },
+ },
+ "models": {
+ "models": {
+ "model": "OpenAIModel",
+ "list_models_response": "OpenAIListModelsResponse",
+ },
+ "methods": {
+ "list": {"paginated": False, "endpoint": "get /v1/models"},
+ "retrieve": "get /v1/models/{model_id}",
+ "register": "post /v1/models",
+ "unregister": "delete /v1/models/{model_id}",
+ },
+ "subresources": {"openai": {"methods": {"list": {"paginated": False, "endpoint": "get /v1/models"}}}},
+ },
+ "providers": {
+ "models": {"list_providers_response": "ListProvidersResponse"},
+ "methods": {
+ "list": {"paginated": False, "endpoint": "get /v1/providers"},
+ "retrieve": "get /v1/providers/{provider_id}",
+ },
+ },
+ "routes": {
+ "models": {"list_routes_response": "ListRoutesResponse"},
+ "methods": {"list": {"paginated": False, "endpoint": "get /v1/inspect/routes"}},
+ },
+ "moderations": {
+ "models": {"create_response": "ModerationObject"},
+ "methods": {"create": "post /v1/moderations"},
+ },
+ "safety": {
+ "models": {"run_shield_response": "RunShieldResponse"},
+ "methods": {"run_shield": "post /v1/safety/run-shield"},
+ },
+ "shields": {
+ "models": {"shield": "Shield", "list_shields_response": "ListShieldsResponse"},
+ "methods": {
+ "retrieve": "get /v1/shields/{identifier}",
+ "list": {"paginated": False, "endpoint": "get /v1/shields"},
+ "register": "post /v1/shields",
+ "delete": "delete /v1/shields/{identifier}",
+ },
+ },
+ "scoring": {
+ "methods": {
+ "score": "post /v1/scoring/score",
+ "score_batch": "post /v1/scoring/score-batch",
+ }
+ },
+ "scoring_functions": {
+ "models": {
+ "scoring_fn": "ScoringFn",
+ "scoring_fn_params": "ScoringFnParams",
+ "list_scoring_functions_response": "ListScoringFunctionsResponse",
+ },
+ "methods": {
+ "retrieve": "get /v1/scoring-functions/{scoring_fn_id}",
+ "list": {"paginated": False, "endpoint": "get /v1/scoring-functions"},
+ "register": "post /v1/scoring-functions",
+ "unregister": "delete /v1/scoring-functions/{scoring_fn_id}",
+ },
+ },
+ "files": {
+ "models": {
+ "file": "OpenAIFileObject",
+ "list_files_response": "ListOpenAIFileResponse",
+ "delete_file_response": "OpenAIFileDeleteResponse",
+ },
+ "methods": {
+ "create": "post /v1/files",
+ "list": "get /v1/files",
+ "retrieve": "get /v1/files/{file_id}",
+ "delete": "delete /v1/files/{file_id}",
+ "content": "get /v1/files/{file_id}/content",
+ },
+ },
+ "batches": {
+ "methods": {
+ "create": "post /v1/batches",
+ "list": "get /v1/batches",
+ "retrieve": "get /v1/batches/{batch_id}",
+ "cancel": "post /v1/batches/{batch_id}/cancel",
+ }
+ },
+ "alpha": {
+ "subresources": {
+ "inference": {"methods": {"rerank": "post /v1alpha/inference/rerank"}},
+ "post_training": {
+ "models": {
+ "algorithm_config": "AlgorithmConfig",
+ "post_training_job": "PostTrainingJob",
+ "list_post_training_jobs_response": "ListPostTrainingJobsResponse",
+ },
+ "methods": {
+ "preference_optimize": "post /v1alpha/post-training/preference-optimize",
+ "supervised_fine_tune": "post /v1alpha/post-training/supervised-fine-tune",
+ },
+ "subresources": {
+ "job": {
+ "methods": {
+ "artifacts": "get /v1alpha/post-training/job/artifacts",
+ "cancel": "post /v1alpha/post-training/job/cancel",
+ "status": "get /v1alpha/post-training/job/status",
+ "list": {
+ "paginated": False,
+ "endpoint": "get /v1alpha/post-training/jobs",
+ },
+ }
+ }
+ },
+ },
+ "benchmarks": {
+ "models": {
+ "benchmark": "Benchmark",
+ "list_benchmarks_response": "ListBenchmarksResponse",
+ },
+ "methods": {
+ "retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}",
+ "list": {
+ "paginated": False,
+ "endpoint": "get /v1alpha/eval/benchmarks",
+ },
+ "register": "post /v1alpha/eval/benchmarks",
+ "unregister": "delete /v1alpha/eval/benchmarks/{benchmark_id}",
+ },
+ },
+ "eval": {
+ "models": {
+ "evaluate_response": "EvaluateResponse",
+ "benchmark_config": "BenchmarkConfig",
+ "job": "Job",
+ },
+ "methods": {
+ "evaluate_rows": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
+ "run_eval": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs",
+ "evaluate_rows_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
+ "run_eval_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs",
+ },
+ "subresources": {
+ "jobs": {
+ "methods": {
+ "cancel": "delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
+ "status": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
+ "retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result",
+ }
+ }
+ },
+ },
+ }
+ },
+ "beta": {
+ "subresources": {
+ "datasets": {
+ "models": {"list_datasets_response": "ListDatasetsResponse"},
+ "methods": {
+ "register": "post /v1beta/datasets",
+ "retrieve": "get /v1beta/datasets/{dataset_id}",
+ "list": {"paginated": False, "endpoint": "get /v1beta/datasets"},
+ "unregister": "delete /v1beta/datasets/{dataset_id}",
+ "iterrows": "get /v1beta/datasetio/iterrows/{dataset_id}",
+ "appendrows": "post /v1beta/datasetio/append-rows/{dataset_id}",
+ },
+ }
+ }
+ },
+}
+
+
+HTTP_METHODS = {"get", "post", "put", "patch", "delete", "options", "head"}
+
+
+@dataclass
+class Endpoint:
+ method: str
+ path: str
+ extra: dict[str, Any] = field(default_factory=dict)
+
+ @classmethod
+ def from_config(cls, value: Any) -> Endpoint:
+ if isinstance(value, str):
+ method, _, path = value.partition(" ")
+ return cls._from_parts(method, path)
+ if isinstance(value, dict) and "endpoint" in value:
+ method, _, path = value["endpoint"].partition(" ")
+ extra = {k: v for k, v in value.items() if k != "endpoint"}
+ endpoint = cls._from_parts(method, path)
+ endpoint.extra.update(extra)
+ return endpoint
+ raise ValueError(f"Unsupported endpoint value: {value!r}")
+
+ @classmethod
+ def _from_parts(cls, method: str, path: str) -> Endpoint:
+ method = method.strip().lower()
+ path = path.strip()
+ if method not in HTTP_METHODS:
+ raise ValueError(f"Unsupported HTTP method for Stainless config: {method!r}")
+ if not path.startswith("/"):
+ raise ValueError(f"Endpoint path must start with '/': {path!r}")
+ return cls(method=method, path=path)
+
+ def to_config(self) -> Any:
+ if not self.extra:
+ return f"{self.method} {self.path}"
+ data = dict(self.extra)
+ data["endpoint"] = f"{self.method} {self.path}"
+ return data
+
+ def route_key(self) -> str:
+ return f"{self.method} {self.path}"
+
+
+@dataclass
+class Resource:
+ models: dict[str, str] | None = None
+ methods: dict[str, Endpoint] = field(default_factory=dict)
+ subresources: dict[str, Resource] = field(default_factory=dict)
+
+ @classmethod
+ def from_dict(cls, data: dict[str, Any]) -> Resource:
+ models = data.get("models")
+ methods = {name: Endpoint.from_config(value) for name, value in data.get("methods", {}).items()}
+ subresources = {name: cls.from_dict(value) for name, value in data.get("subresources", {}).items()}
+ return cls(models=models, methods=methods, subresources=subresources)
+
+ def to_config(self) -> dict[str, Any]:
+ result: dict[str, Any] = {}
+ if self.models:
+ result["models"] = self.models
+ if self.methods:
+ result["methods"] = {name: endpoint.to_config() for name, endpoint in self.methods.items()}
+ if self.subresources:
+ result["subresources"] = {name: resource.to_config() for name, resource in self.subresources.items()}
+ return result
+
+ def collect_endpoint_paths(self) -> set[str]:
+ paths = {endpoint.route_key() for endpoint in self.methods.values()}
+ for subresource in self.subresources.values():
+ paths.update(subresource.collect_endpoint_paths())
+ return paths
+
+ def iter_endpoints(self, prefix: str) -> Iterator[tuple[str, str]]:
+ for method_name, endpoint in self.methods.items():
+ label = f"{prefix}.{method_name}" if prefix else method_name
+ yield endpoint.route_key(), label
+ for sub_name, subresource in self.subresources.items():
+ sub_prefix = f"{prefix}.{sub_name}" if prefix else sub_name
+ yield from subresource.iter_endpoints(sub_prefix)
+
+
+_RESOURCES = {name: Resource.from_dict(data) for name, data in ALL_RESOURCES.items()}
+
+
+def _load_openapi_paths(openapi_path: Path) -> set[str]:
+ spec = yaml.safe_load(openapi_path.read_text()) or {}
+ paths: set[str] = set()
+ for path, path_item in (spec.get("paths") or {}).items():
+ if not isinstance(path_item, dict):
+ continue
+ for method, operation in path_item.items():
+ if not isinstance(operation, dict):
+ continue
+ paths.add(f"{str(method).lower()} {path}")
+ return paths
+
+
+@dataclass(frozen=True)
+class StainlessConfig:
+ organization: dict[str, Any]
+ security: list[Any]
+ security_schemes: dict[str, Any]
+ targets: dict[str, Any]
+ client_settings: dict[str, Any]
+ environments: dict[str, Any]
+ pagination: list[dict[str, Any]]
+ settings: dict[str, Any]
+ openapi: dict[str, Any]
+ readme: dict[str, Any]
+ resources: dict[str, Resource]
+
+ @classmethod
+ def make(cls) -> StainlessConfig:
+ return cls(
+ organization=ORGANIZATION,
+ security=SECURITY,
+ security_schemes=SECURITY_SCHEMES,
+ targets=TARGETS,
+ client_settings=CLIENT_SETTINGS,
+ environments=ENVIRONMENTS,
+ pagination=PAGINATION,
+ settings=SETTINGS,
+ openapi=OPENAPI,
+ readme=README,
+ resources=dict(_RESOURCES),
+ )
+
+ def referenced_paths(self) -> set[str]:
+ paths: set[str] = set()
+ for resource in self.resources.values():
+ paths.update(resource.collect_endpoint_paths())
+ paths.update(self.readme_endpoint_paths())
+ return paths
+
+ def readme_endpoint_paths(self) -> set[str]:
+ example_requests = self.readme.get("example_requests", {}) if self.readme else {}
+ paths: set[str] = set()
+ for entry in example_requests.values():
+ endpoint = entry.get("endpoint") if isinstance(entry, dict) else None
+ if isinstance(endpoint, str):
+ method, _, route = endpoint.partition(" ")
+ method = method.strip().lower()
+ route = route.strip()
+ if method and route:
+ paths.add(f"{method} {route}")
+ return paths
+
+ def endpoint_map(self) -> dict[str, list[str]]:
+ mapping: dict[str, list[str]] = {}
+ for resource_name, resource in self.resources.items():
+ for route, label in resource.iter_endpoints(resource_name):
+ mapping.setdefault(route, []).append(label)
+ return mapping
+
+ def validate_unique_endpoints(self) -> None:
+ duplicates: dict[str, list[str]] = {}
+ for route, labels in self.endpoint_map().items():
+ top_levels = {label.split(".", 1)[0] for label in labels}
+ if len(top_levels) > 1:
+ duplicates[route] = labels
+ if duplicates:
+ formatted = "\n".join(
+ f" - {route} defined in: {', '.join(sorted(labels))}" for route, labels in sorted(duplicates.items())
+ )
+ raise ValueError("Duplicate endpoints found across resources:\n" + formatted)
+
+ def validate_readme_endpoints(self) -> None:
+ resource_paths: set[str] = set()
+ for resource in self.resources.values():
+ resource_paths.update(resource.collect_endpoint_paths())
+ missing = sorted(path for path in self.readme_endpoint_paths() if path not in resource_paths)
+ if missing:
+ formatted = "\n".join(f" - {path}" for path in missing)
+ raise ValueError("README example endpoints are not present in Stainless resources:\n" + formatted)
+
+ def to_dict(self) -> dict[str, Any]:
+ cfg: dict[str, Any] = {}
+ for section in SECTION_ORDER:
+ if section == "resources":
+ cfg[section] = {name: resource.to_config() for name, resource in self.resources.items()}
+ continue
+ cfg[section] = getattr(self, section)
+ return cfg
+
+ def validate_against_openapi(self, openapi_path: Path) -> None:
+ if not openapi_path.exists():
+ raise FileNotFoundError(f"OpenAPI spec not found at {openapi_path}")
+ spec_paths = _load_openapi_paths(openapi_path)
+ config_paths = self.referenced_paths()
+ missing = sorted(path for path in config_paths if path not in spec_paths)
+ if missing:
+ formatted = "\n".join(f" - {path}" for path in missing)
+ raise ValueError("Stainless config references missing endpoints:\n" + formatted)
+
+ def validate(self, openapi_path: Path | None = None) -> None:
+ self.validate_unique_endpoints()
+ self.validate_readme_endpoints()
+ if openapi_path is not None:
+ self.validate_against_openapi(openapi_path)
+
+
+def build_config() -> dict[str, Any]:
+ return StainlessConfig.make().to_dict()
+
+
+def write_config(repo_root: Path, openapi_path: Path | None = None) -> Path:
+ stainless_config = StainlessConfig.make()
+ spec_path = (openapi_path or (repo_root / "client-sdks" / "stainless" / "openapi.yml")).resolve()
+ stainless_config.validate(spec_path)
+ yaml_text = yaml.safe_dump(stainless_config.to_dict(), sort_keys=False)
+ output = repo_root / "client-sdks" / "stainless" / "config.yml"
+ output.write_text(HEADER + yaml_text)
+ return output
+
+
+def main() -> None:
+ repo_root = Path(__file__).resolve().parents[3]
+ output = write_config(repo_root)
+ print(f"Wrote Stainless config: {output}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/provider_codegen.py b/scripts/provider_codegen.py
index d62d626ad..0eec46bc2 100755
--- a/scripts/provider_codegen.py
+++ b/scripts/provider_codegen.py
@@ -8,7 +8,8 @@
import subprocess
import sys
from pathlib import Path
-from typing import Any
+from types import UnionType
+from typing import Annotated, Any, Union, get_args, get_origin
from pydantic_core import PydanticUndefined
from rich.progress import Progress, SpinnerColumn, TextColumn
@@ -51,6 +52,41 @@ class ChangedPathTracker:
return self._changed_paths
+def extract_type_annotation(annotation: Any) -> str:
+ """extract a type annotation into a clean string representation."""
+ if annotation is None:
+ return "Any"
+
+ if annotation is type(None):
+ return "None"
+
+ origin = get_origin(annotation)
+ args = get_args(annotation)
+
+ # recursive workaround for Annotated types to ignore FieldInfo part
+ if origin is Annotated and args:
+ return extract_type_annotation(args[0])
+
+ if origin in [Union, UnionType]:
+ non_none_args = [arg for arg in args if arg is not type(None)]
+ has_none = len(non_none_args) < len(args)
+
+ if len(non_none_args) == 1:
+ formatted = extract_type_annotation(non_none_args[0])
+ return f"{formatted} | None" if has_none else formatted
+ else:
+ formatted_args = [extract_type_annotation(arg) for arg in non_none_args]
+ result = " | ".join(formatted_args)
+ return f"{result} | None" if has_none else result
+
+ if origin is not None and args:
+ origin_name = getattr(origin, "__name__", str(origin))
+ formatted_args = [extract_type_annotation(arg) for arg in args]
+ return f"{origin_name}[{', '.join(formatted_args)}]"
+
+ return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
+
+
def get_config_class_info(config_class_path: str) -> dict[str, Any]:
"""Extract configuration information from a config class."""
try:
@@ -78,14 +114,8 @@ def get_config_class_info(config_class_path: str) -> dict[str, Any]:
for field_name, field in config_class.model_fields.items():
if getattr(field, "exclude", False):
continue
- field_type = str(field.annotation) if field.annotation else "Any"
- # this string replace is ridiculous
- field_type = field_type.replace("typing.", "").replace("Optional[", "").replace("]", "")
- field_type = field_type.replace("Annotated[", "").replace("FieldInfo(", "").replace(")", "")
- field_type = field_type.replace("llama_stack_api.inference.", "")
- field_type = field_type.replace("llama_stack.providers.", "")
- field_type = field_type.replace("llama_stack_api.datatypes.", "")
+ field_type = extract_type_annotation(field.annotation)
default_value = field.default
if field.default_factory is not None:
@@ -345,8 +375,16 @@ def generate_index_docs(api_name: str, api_docstring: str | None, provider_entri
# Add YAML frontmatter for index
md_lines.append("---")
if api_docstring:
- clean_desc = api_docstring.strip().replace('"', '\\"')
- md_lines.append(f'description: "{clean_desc}"')
+ # Handle multi-line descriptions in YAML
+ if "\n" in api_docstring.strip():
+ md_lines.append("description: |")
+ for line in api_docstring.strip().split("\n"):
+ # Avoid trailing whitespace by only adding spaces to non-empty lines
+ md_lines.append(f" {line}" if line.strip() else "")
+ else:
+ # For single line descriptions, format properly for YAML
+ clean_desc = api_docstring.strip().replace('"', '\\"')
+ md_lines.append(f'description: "{clean_desc}"')
md_lines.append(f"sidebar_label: {sidebar_label}")
md_lines.append(f"title: {api_name.title()}")
md_lines.append("---")
diff --git a/scripts/run_openapi_generator.sh b/scripts/run_openapi_generator.sh
index 946b2886f..d4e3b2ec7 100755
--- a/scripts/run_openapi_generator.sh
+++ b/scripts/run_openapi_generator.sh
@@ -17,3 +17,5 @@ PYTHONPATH=$PYTHONPATH:$stack_dir \
python3 -m scripts.openapi_generator "$stack_dir"/docs/static
cp "$stack_dir"/docs/static/stainless-llama-stack-spec.yaml "$stack_dir"/client-sdks/stainless/openapi.yml
+PYTHONPATH=$PYTHONPATH:$stack_dir \
+ python3 -m scripts.openapi_generator.stainless_config.generate_config
diff --git a/src/llama_stack/core/conversations/conversations.py b/src/llama_stack/core/conversations/conversations.py
index 4cf5a82ee..90402439b 100644
--- a/src/llama_stack/core/conversations/conversations.py
+++ b/src/llama_stack/core/conversations/conversations.py
@@ -11,10 +11,9 @@ from typing import Any, Literal
from pydantic import BaseModel, TypeAdapter
from llama_stack.core.datatypes import AccessRule, StackRunConfig
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
+from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.log import get_logger
-from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
-from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
-from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
from llama_stack_api import (
Conversation,
ConversationDeletedResource,
@@ -25,6 +24,7 @@ from llama_stack_api import (
Conversations,
Metadata,
)
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
logger = get_logger(name=__name__, category="openai_conversations")
diff --git a/src/llama_stack/core/prompts/prompts.py b/src/llama_stack/core/prompts/prompts.py
index 9f532c1cd..ff67ad138 100644
--- a/src/llama_stack/core/prompts/prompts.py
+++ b/src/llama_stack/core/prompts/prompts.py
@@ -10,7 +10,7 @@ from typing import Any
from pydantic import BaseModel
from llama_stack.core.datatypes import StackRunConfig
-from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl
+from llama_stack.core.storage.kvstore import KVStore, kvstore_impl
from llama_stack_api import ListPromptsResponse, Prompt, Prompts
diff --git a/src/llama_stack/core/server/quota.py b/src/llama_stack/core/server/quota.py
index 689f0e4c3..d74d3e89d 100644
--- a/src/llama_stack/core/server/quota.py
+++ b/src/llama_stack/core/server/quota.py
@@ -11,9 +11,9 @@ from datetime import UTC, datetime, timedelta
from starlette.types import ASGIApp, Receive, Scope, Send
from llama_stack.core.storage.datatypes import KVStoreReference, StorageBackendType
+from llama_stack.core.storage.kvstore.kvstore import _KVSTORE_BACKENDS, kvstore_impl
from llama_stack.log import get_logger
-from llama_stack.providers.utils.kvstore.api import KVStore
-from llama_stack.providers.utils.kvstore.kvstore import _KVSTORE_BACKENDS, kvstore_impl
+from llama_stack_api.internal.kvstore import KVStore
logger = get_logger(name=__name__, category="core::server")
diff --git a/src/llama_stack/core/stack.py b/src/llama_stack/core/stack.py
index 00d990cb1..8ba1f2afd 100644
--- a/src/llama_stack/core/stack.py
+++ b/src/llama_stack/core/stack.py
@@ -385,8 +385,8 @@ def _initialize_storage(run_config: StackRunConfig):
else:
raise ValueError(f"Unknown storage backend type: {type}")
- from llama_stack.providers.utils.kvstore.kvstore import register_kvstore_backends
- from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
+ from llama_stack.core.storage.kvstore.kvstore import register_kvstore_backends
+ from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
register_kvstore_backends(kv_backends)
register_sqlstore_backends(sql_backends)
diff --git a/src/llama_stack/core/storage/datatypes.py b/src/llama_stack/core/storage/datatypes.py
index 4b17b9ea9..527c1b828 100644
--- a/src/llama_stack/core/storage/datatypes.py
+++ b/src/llama_stack/core/storage/datatypes.py
@@ -12,6 +12,8 @@ from typing import Annotated, Literal
from pydantic import BaseModel, Field, field_validator
+from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR
+
class StorageBackendType(StrEnum):
KV_REDIS = "kv_redis"
@@ -256,15 +258,24 @@ class ResponsesStoreReference(InferenceStoreReference):
class ServerStoresConfig(BaseModel):
metadata: KVStoreReference | None = Field(
- default=None,
+ default=KVStoreReference(
+ backend="kv_default",
+ namespace="registry",
+ ),
description="Metadata store configuration (uses KV backend)",
)
inference: InferenceStoreReference | None = Field(
- default=None,
+ default=InferenceStoreReference(
+ backend="sql_default",
+ table_name="inference_store",
+ ),
description="Inference store configuration (uses SQL backend)",
)
conversations: SqlStoreReference | None = Field(
- default=None,
+ default=SqlStoreReference(
+ backend="sql_default",
+ table_name="openai_conversations",
+ ),
description="Conversations store configuration (uses SQL backend)",
)
responses: ResponsesStoreReference | None = Field(
@@ -272,13 +283,21 @@ class ServerStoresConfig(BaseModel):
description="Responses store configuration (uses SQL backend)",
)
prompts: KVStoreReference | None = Field(
- default=None,
+ default=KVStoreReference(backend="kv_default", namespace="prompts"),
description="Prompts store configuration (uses KV backend)",
)
class StorageConfig(BaseModel):
backends: dict[str, StorageBackendConfig] = Field(
+ default={
+ "kv_default": SqliteKVStoreConfig(
+ db_path=f"${{env.SQLITE_STORE_DIR:={DISTRIBS_BASE_DIR}}}/kvstore.db",
+ ),
+ "sql_default": SqliteSqlStoreConfig(
+ db_path=f"${{env.SQLITE_STORE_DIR:={DISTRIBS_BASE_DIR}}}/sql_store.db",
+ ),
+ },
description="Named backend configurations (e.g., 'default', 'cache')",
)
stores: ServerStoresConfig = Field(
diff --git a/src/llama_stack/core/storage/kvstore/__init__.py b/src/llama_stack/core/storage/kvstore/__init__.py
new file mode 100644
index 000000000..2d60f1508
--- /dev/null
+++ b/src/llama_stack/core/storage/kvstore/__init__.py
@@ -0,0 +1,9 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from llama_stack_api.internal.kvstore import KVStore as KVStore
+
+from .kvstore import * # noqa: F401, F403
diff --git a/src/llama_stack/providers/utils/kvstore/config.py b/src/llama_stack/core/storage/kvstore/config.py
similarity index 100%
rename from src/llama_stack/providers/utils/kvstore/config.py
rename to src/llama_stack/core/storage/kvstore/config.py
diff --git a/src/llama_stack/providers/utils/kvstore/kvstore.py b/src/llama_stack/core/storage/kvstore/kvstore.py
similarity index 82%
rename from src/llama_stack/providers/utils/kvstore/kvstore.py
rename to src/llama_stack/core/storage/kvstore/kvstore.py
index 5b8d77102..8ea9282fa 100644
--- a/src/llama_stack/providers/utils/kvstore/kvstore.py
+++ b/src/llama_stack/core/storage/kvstore/kvstore.py
@@ -13,11 +13,19 @@ from __future__ import annotations
import asyncio
from collections import defaultdict
+from datetime import datetime
+from typing import cast
-from llama_stack.core.storage.datatypes import KVStoreReference, StorageBackendConfig, StorageBackendType
+from llama_stack.core.storage.datatypes import KVStoreReference, StorageBackendConfig
+from llama_stack_api.internal.kvstore import KVStore
-from .api import KVStore
-from .config import KVStoreConfig
+from .config import (
+ KVStoreConfig,
+ MongoDBKVStoreConfig,
+ PostgresKVStoreConfig,
+ RedisKVStoreConfig,
+ SqliteKVStoreConfig,
+)
def kvstore_dependencies():
@@ -33,7 +41,7 @@ def kvstore_dependencies():
class InmemoryKVStoreImpl(KVStore):
def __init__(self):
- self._store = {}
+ self._store: dict[str, str] = {}
async def initialize(self) -> None:
pass
@@ -41,7 +49,7 @@ class InmemoryKVStoreImpl(KVStore):
async def get(self, key: str) -> str | None:
return self._store.get(key)
- async def set(self, key: str, value: str) -> None:
+ async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
self._store[key] = value
async def values_in_range(self, start_key: str, end_key: str) -> list[str]:
@@ -70,7 +78,8 @@ def register_kvstore_backends(backends: dict[str, StorageBackendConfig]) -> None
_KVSTORE_INSTANCES.clear()
_KVSTORE_LOCKS.clear()
for name, cfg in backends.items():
- _KVSTORE_BACKENDS[name] = cfg
+ typed_cfg = cast(KVStoreConfig, cfg)
+ _KVSTORE_BACKENDS[name] = typed_cfg
async def kvstore_impl(reference: KVStoreReference) -> KVStore:
@@ -94,19 +103,20 @@ async def kvstore_impl(reference: KVStoreReference) -> KVStore:
config = backend_config.model_copy()
config.namespace = reference.namespace
- if config.type == StorageBackendType.KV_REDIS.value:
+ impl: KVStore
+ if isinstance(config, RedisKVStoreConfig):
from .redis import RedisKVStoreImpl
impl = RedisKVStoreImpl(config)
- elif config.type == StorageBackendType.KV_SQLITE.value:
+ elif isinstance(config, SqliteKVStoreConfig):
from .sqlite import SqliteKVStoreImpl
impl = SqliteKVStoreImpl(config)
- elif config.type == StorageBackendType.KV_POSTGRES.value:
+ elif isinstance(config, PostgresKVStoreConfig):
from .postgres import PostgresKVStoreImpl
impl = PostgresKVStoreImpl(config)
- elif config.type == StorageBackendType.KV_MONGODB.value:
+ elif isinstance(config, MongoDBKVStoreConfig):
from .mongodb import MongoDBKVStoreImpl
impl = MongoDBKVStoreImpl(config)
diff --git a/src/llama_stack/providers/utils/kvstore/mongodb/__init__.py b/src/llama_stack/core/storage/kvstore/mongodb/__init__.py
similarity index 100%
rename from src/llama_stack/providers/utils/kvstore/mongodb/__init__.py
rename to src/llama_stack/core/storage/kvstore/mongodb/__init__.py
diff --git a/src/llama_stack/providers/utils/kvstore/mongodb/mongodb.py b/src/llama_stack/core/storage/kvstore/mongodb/mongodb.py
similarity index 98%
rename from src/llama_stack/providers/utils/kvstore/mongodb/mongodb.py
rename to src/llama_stack/core/storage/kvstore/mongodb/mongodb.py
index 964c45090..673d6038f 100644
--- a/src/llama_stack/providers/utils/kvstore/mongodb/mongodb.py
+++ b/src/llama_stack/core/storage/kvstore/mongodb/mongodb.py
@@ -9,8 +9,8 @@ from datetime import datetime
from pymongo import AsyncMongoClient
from pymongo.asynchronous.collection import AsyncCollection
+from llama_stack.core.storage.kvstore import KVStore
from llama_stack.log import get_logger
-from llama_stack.providers.utils.kvstore import KVStore
from ..config import MongoDBKVStoreConfig
diff --git a/src/llama_stack/providers/utils/kvstore/postgres/__init__.py b/src/llama_stack/core/storage/kvstore/postgres/__init__.py
similarity index 100%
rename from src/llama_stack/providers/utils/kvstore/postgres/__init__.py
rename to src/llama_stack/core/storage/kvstore/postgres/__init__.py
diff --git a/src/llama_stack/providers/utils/kvstore/postgres/postgres.py b/src/llama_stack/core/storage/kvstore/postgres/postgres.py
similarity index 73%
rename from src/llama_stack/providers/utils/kvstore/postgres/postgres.py
rename to src/llama_stack/core/storage/kvstore/postgres/postgres.py
index 56d6dbb48..39c3fd2e2 100644
--- a/src/llama_stack/providers/utils/kvstore/postgres/postgres.py
+++ b/src/llama_stack/core/storage/kvstore/postgres/postgres.py
@@ -6,12 +6,13 @@
from datetime import datetime
-import psycopg2
-from psycopg2.extras import DictCursor
+import psycopg2 # type: ignore[import-not-found]
+from psycopg2.extensions import connection as PGConnection # type: ignore[import-not-found]
+from psycopg2.extras import DictCursor # type: ignore[import-not-found]
from llama_stack.log import get_logger
+from llama_stack_api.internal.kvstore import KVStore
-from ..api import KVStore
from ..config import PostgresKVStoreConfig
log = get_logger(name=__name__, category="providers::utils")
@@ -20,12 +21,12 @@ log = get_logger(name=__name__, category="providers::utils")
class PostgresKVStoreImpl(KVStore):
def __init__(self, config: PostgresKVStoreConfig):
self.config = config
- self.conn = None
- self.cursor = None
+ self._conn: PGConnection | None = None
+ self._cursor: DictCursor | None = None
async def initialize(self) -> None:
try:
- self.conn = psycopg2.connect(
+ self._conn = psycopg2.connect(
host=self.config.host,
port=self.config.port,
database=self.config.db,
@@ -34,11 +35,11 @@ class PostgresKVStoreImpl(KVStore):
sslmode=self.config.ssl_mode,
sslrootcert=self.config.ca_cert_path,
)
- self.conn.autocommit = True
- self.cursor = self.conn.cursor(cursor_factory=DictCursor)
+ self._conn.autocommit = True
+ self._cursor = self._conn.cursor(cursor_factory=DictCursor)
# Create table if it doesn't exist
- self.cursor.execute(
+ self._cursor.execute(
f"""
CREATE TABLE IF NOT EXISTS {self.config.table_name} (
key TEXT PRIMARY KEY,
@@ -51,6 +52,11 @@ class PostgresKVStoreImpl(KVStore):
log.exception("Could not connect to PostgreSQL database server")
raise RuntimeError("Could not connect to PostgreSQL database server") from e
+ def _cursor_or_raise(self) -> DictCursor:
+ if self._cursor is None:
+ raise RuntimeError("Postgres client not initialized")
+ return self._cursor
+
def _namespaced_key(self, key: str) -> str:
if not self.config.namespace:
return key
@@ -58,7 +64,8 @@ class PostgresKVStoreImpl(KVStore):
async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
key = self._namespaced_key(key)
- self.cursor.execute(
+ cursor = self._cursor_or_raise()
+ cursor.execute(
f"""
INSERT INTO {self.config.table_name} (key, value, expiration)
VALUES (%s, %s, %s)
@@ -70,7 +77,8 @@ class PostgresKVStoreImpl(KVStore):
async def get(self, key: str) -> str | None:
key = self._namespaced_key(key)
- self.cursor.execute(
+ cursor = self._cursor_or_raise()
+ cursor.execute(
f"""
SELECT value FROM {self.config.table_name}
WHERE key = %s
@@ -78,12 +86,13 @@ class PostgresKVStoreImpl(KVStore):
""",
(key,),
)
- result = self.cursor.fetchone()
+ result = cursor.fetchone()
return result[0] if result else None
async def delete(self, key: str) -> None:
key = self._namespaced_key(key)
- self.cursor.execute(
+ cursor = self._cursor_or_raise()
+ cursor.execute(
f"DELETE FROM {self.config.table_name} WHERE key = %s",
(key,),
)
@@ -92,7 +101,8 @@ class PostgresKVStoreImpl(KVStore):
start_key = self._namespaced_key(start_key)
end_key = self._namespaced_key(end_key)
- self.cursor.execute(
+ cursor = self._cursor_or_raise()
+ cursor.execute(
f"""
SELECT value FROM {self.config.table_name}
WHERE key >= %s AND key < %s
@@ -101,14 +111,15 @@ class PostgresKVStoreImpl(KVStore):
""",
(start_key, end_key),
)
- return [row[0] for row in self.cursor.fetchall()]
+ return [row[0] for row in cursor.fetchall()]
async def keys_in_range(self, start_key: str, end_key: str) -> list[str]:
start_key = self._namespaced_key(start_key)
end_key = self._namespaced_key(end_key)
- self.cursor.execute(
+ cursor = self._cursor_or_raise()
+ cursor.execute(
f"SELECT key FROM {self.config.table_name} WHERE key >= %s AND key < %s",
(start_key, end_key),
)
- return [row[0] for row in self.cursor.fetchall()]
+ return [row[0] for row in cursor.fetchall()]
diff --git a/src/llama_stack/providers/utils/kvstore/redis/__init__.py b/src/llama_stack/core/storage/kvstore/redis/__init__.py
similarity index 100%
rename from src/llama_stack/providers/utils/kvstore/redis/__init__.py
rename to src/llama_stack/core/storage/kvstore/redis/__init__.py
diff --git a/src/llama_stack/providers/utils/kvstore/redis/redis.py b/src/llama_stack/core/storage/kvstore/redis/redis.py
similarity index 54%
rename from src/llama_stack/providers/utils/kvstore/redis/redis.py
rename to src/llama_stack/core/storage/kvstore/redis/redis.py
index 3d2d956c3..2b35a22e1 100644
--- a/src/llama_stack/providers/utils/kvstore/redis/redis.py
+++ b/src/llama_stack/core/storage/kvstore/redis/redis.py
@@ -6,18 +6,25 @@
from datetime import datetime
-from redis.asyncio import Redis
+from redis.asyncio import Redis # type: ignore[import-not-found]
+
+from llama_stack_api.internal.kvstore import KVStore
-from ..api import KVStore
from ..config import RedisKVStoreConfig
class RedisKVStoreImpl(KVStore):
def __init__(self, config: RedisKVStoreConfig):
self.config = config
+ self._redis: Redis | None = None
async def initialize(self) -> None:
- self.redis = Redis.from_url(self.config.url)
+ self._redis = Redis.from_url(self.config.url)
+
+ def _client(self) -> Redis:
+ if self._redis is None:
+ raise RuntimeError("Redis client not initialized")
+ return self._redis
def _namespaced_key(self, key: str) -> str:
if not self.config.namespace:
@@ -26,30 +33,37 @@ class RedisKVStoreImpl(KVStore):
async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
key = self._namespaced_key(key)
- await self.redis.set(key, value)
+ client = self._client()
+ await client.set(key, value)
if expiration:
- await self.redis.expireat(key, expiration)
+ await client.expireat(key, expiration)
async def get(self, key: str) -> str | None:
key = self._namespaced_key(key)
- value = await self.redis.get(key)
+ client = self._client()
+ value = await client.get(key)
if value is None:
return None
- await self.redis.ttl(key)
- return value
+ await client.ttl(key)
+ if isinstance(value, bytes):
+ return value.decode("utf-8")
+ if isinstance(value, str):
+ return value
+ return str(value)
async def delete(self, key: str) -> None:
key = self._namespaced_key(key)
- await self.redis.delete(key)
+ await self._client().delete(key)
async def values_in_range(self, start_key: str, end_key: str) -> list[str]:
start_key = self._namespaced_key(start_key)
end_key = self._namespaced_key(end_key)
+ client = self._client()
cursor = 0
pattern = start_key + "*" # Match all keys starting with start_key prefix
- matching_keys = []
+ matching_keys: list[str | bytes] = []
while True:
- cursor, keys = await self.redis.scan(cursor, match=pattern, count=1000)
+ cursor, keys = await client.scan(cursor, match=pattern, count=1000)
for key in keys:
key_str = key.decode("utf-8") if isinstance(key, bytes) else key
@@ -61,7 +75,7 @@ class RedisKVStoreImpl(KVStore):
# Then fetch all values in a single MGET call
if matching_keys:
- values = await self.redis.mget(matching_keys)
+ values = await client.mget(matching_keys)
return [
value.decode("utf-8") if isinstance(value, bytes) else value for value in values if value is not None
]
@@ -70,7 +84,18 @@ class RedisKVStoreImpl(KVStore):
async def keys_in_range(self, start_key: str, end_key: str) -> list[str]:
"""Get all keys in the given range."""
- matching_keys = await self.redis.zrangebylex(self.namespace, f"[{start_key}", f"[{end_key}")
- if not matching_keys:
- return []
- return [k.decode("utf-8") for k in matching_keys]
+ start_key = self._namespaced_key(start_key)
+ end_key = self._namespaced_key(end_key)
+ client = self._client()
+ cursor = 0
+ pattern = start_key + "*"
+ result: list[str] = []
+ while True:
+ cursor, keys = await client.scan(cursor, match=pattern, count=1000)
+ for key in keys:
+ key_str = key.decode("utf-8") if isinstance(key, bytes) else str(key)
+ if start_key <= key_str <= end_key:
+ result.append(key_str)
+ if cursor == 0:
+ break
+ return result
diff --git a/src/llama_stack/providers/utils/kvstore/sqlite/__init__.py b/src/llama_stack/core/storage/kvstore/sqlite/__init__.py
similarity index 100%
rename from src/llama_stack/providers/utils/kvstore/sqlite/__init__.py
rename to src/llama_stack/core/storage/kvstore/sqlite/__init__.py
diff --git a/src/llama_stack/providers/utils/kvstore/sqlite/sqlite.py b/src/llama_stack/core/storage/kvstore/sqlite/sqlite.py
similarity index 99%
rename from src/llama_stack/providers/utils/kvstore/sqlite/sqlite.py
rename to src/llama_stack/core/storage/kvstore/sqlite/sqlite.py
index a9a7a1304..22cf8ac49 100644
--- a/src/llama_stack/providers/utils/kvstore/sqlite/sqlite.py
+++ b/src/llama_stack/core/storage/kvstore/sqlite/sqlite.py
@@ -10,8 +10,8 @@ from datetime import datetime
import aiosqlite
from llama_stack.log import get_logger
+from llama_stack_api.internal.kvstore import KVStore
-from ..api import KVStore
from ..config import SqliteKVStoreConfig
logger = get_logger(name=__name__, category="providers::utils")
diff --git a/src/llama_stack/core/storage/sqlstore/__init__.py b/src/llama_stack/core/storage/sqlstore/__init__.py
new file mode 100644
index 000000000..eb843e4ba
--- /dev/null
+++ b/src/llama_stack/core/storage/sqlstore/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from llama_stack_api.internal.sqlstore import (
+ ColumnDefinition as ColumnDefinition,
+)
+from llama_stack_api.internal.sqlstore import (
+ ColumnType as ColumnType,
+)
+from llama_stack_api.internal.sqlstore import (
+ SqlStore as SqlStore,
+)
+
+from .sqlstore import * # noqa: F401,F403
diff --git a/src/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py b/src/llama_stack/core/storage/sqlstore/authorized_sqlstore.py
similarity index 99%
rename from src/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py
rename to src/llama_stack/core/storage/sqlstore/authorized_sqlstore.py
index ba95dd120..e6cdcc543 100644
--- a/src/llama_stack/providers/utils/sqlstore/authorized_sqlstore.py
+++ b/src/llama_stack/core/storage/sqlstore/authorized_sqlstore.py
@@ -14,8 +14,8 @@ from llama_stack.core.datatypes import User
from llama_stack.core.request_headers import get_authenticated_user
from llama_stack.core.storage.datatypes import StorageBackendType
from llama_stack.log import get_logger
-
-from .api import ColumnDefinition, ColumnType, PaginatedResponse, SqlStore
+from llama_stack_api import PaginatedResponse
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType, SqlStore
logger = get_logger(name=__name__, category="providers::utils")
diff --git a/src/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py b/src/llama_stack/core/storage/sqlstore/sqlalchemy_sqlstore.py
similarity index 99%
rename from src/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py
rename to src/llama_stack/core/storage/sqlstore/sqlalchemy_sqlstore.py
index 10009d396..01c561443 100644
--- a/src/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py
+++ b/src/llama_stack/core/storage/sqlstore/sqlalchemy_sqlstore.py
@@ -29,8 +29,7 @@ from sqlalchemy.sql.elements import ColumnElement
from llama_stack.core.storage.datatypes import SqlAlchemySqlStoreConfig
from llama_stack.log import get_logger
from llama_stack_api import PaginatedResponse
-
-from .api import ColumnDefinition, ColumnType, SqlStore
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType, SqlStore
logger = get_logger(name=__name__, category="providers::utils")
diff --git a/src/llama_stack/providers/utils/sqlstore/sqlstore.py b/src/llama_stack/core/storage/sqlstore/sqlstore.py
similarity index 98%
rename from src/llama_stack/providers/utils/sqlstore/sqlstore.py
rename to src/llama_stack/core/storage/sqlstore/sqlstore.py
index 9409b7d00..fb2c9d279 100644
--- a/src/llama_stack/providers/utils/sqlstore/sqlstore.py
+++ b/src/llama_stack/core/storage/sqlstore/sqlstore.py
@@ -16,8 +16,7 @@ from llama_stack.core.storage.datatypes import (
StorageBackendConfig,
StorageBackendType,
)
-
-from .api import SqlStore
+from llama_stack_api.internal.sqlstore import SqlStore
sql_store_pip_packages = ["sqlalchemy[asyncio]", "aiosqlite", "asyncpg"]
diff --git a/src/llama_stack/core/store/registry.py b/src/llama_stack/core/store/registry.py
index 6ff9e575b..7144a94f7 100644
--- a/src/llama_stack/core/store/registry.py
+++ b/src/llama_stack/core/store/registry.py
@@ -12,8 +12,8 @@ import pydantic
from llama_stack.core.datatypes import RoutableObjectWithProvider
from llama_stack.core.storage.datatypes import KVStoreReference
+from llama_stack.core.storage.kvstore import KVStore, kvstore_impl
from llama_stack.log import get_logger
-from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl
logger = get_logger(__name__, category="core::registry")
diff --git a/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml b/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml
index 5384b58fe..7721138c7 100644
--- a/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml
+++ b/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml
@@ -17,44 +17,43 @@ providers:
- provider_id: ${env.CEREBRAS_API_KEY:+cerebras}
provider_type: remote::cerebras
config:
- base_url: https://api.cerebras.ai
+ base_url: https://api.cerebras.ai/v1
api_key: ${env.CEREBRAS_API_KEY:=}
- provider_id: ${env.OLLAMA_URL:+ollama}
provider_type: remote::ollama
config:
- url: ${env.OLLAMA_URL:=http://localhost:11434}
+ base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1}
- provider_id: ${env.VLLM_URL:+vllm}
provider_type: remote::vllm
config:
- url: ${env.VLLM_URL:=}
+ base_url: ${env.VLLM_URL:=}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
- provider_id: ${env.TGI_URL:+tgi}
provider_type: remote::tgi
config:
- url: ${env.TGI_URL:=}
+ base_url: ${env.TGI_URL:=}
- provider_id: fireworks
provider_type: remote::fireworks
config:
- url: https://api.fireworks.ai/inference/v1
+ base_url: https://api.fireworks.ai/inference/v1
api_key: ${env.FIREWORKS_API_KEY:=}
- provider_id: together
provider_type: remote::together
config:
- url: https://api.together.xyz/v1
+ base_url: https://api.together.xyz/v1
api_key: ${env.TOGETHER_API_KEY:=}
- provider_id: bedrock
provider_type: remote::bedrock
config:
- api_key: ${env.AWS_BEDROCK_API_KEY:=}
+ api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=}
region_name: ${env.AWS_DEFAULT_REGION:=us-east-2}
- provider_id: ${env.NVIDIA_API_KEY:+nvidia}
provider_type: remote::nvidia
config:
- url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
+ base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
api_key: ${env.NVIDIA_API_KEY:=}
- append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
- provider_id: openai
provider_type: remote::openai
config:
@@ -76,18 +75,18 @@ providers:
- provider_id: groq
provider_type: remote::groq
config:
- url: https://api.groq.com
+ base_url: https://api.groq.com/openai/v1
api_key: ${env.GROQ_API_KEY:=}
- provider_id: sambanova
provider_type: remote::sambanova
config:
- url: https://api.sambanova.ai/v1
+ base_url: https://api.sambanova.ai/v1
api_key: ${env.SAMBANOVA_API_KEY:=}
- provider_id: ${env.AZURE_API_KEY:+azure}
provider_type: remote::azure
config:
api_key: ${env.AZURE_API_KEY:=}
- api_base: ${env.AZURE_API_BASE:=}
+ base_url: ${env.AZURE_API_BASE:=}
api_version: ${env.AZURE_API_VERSION:=}
api_type: ${env.AZURE_API_TYPE:=}
- provider_id: sentence-transformers
diff --git a/src/llama_stack/distributions/ci-tests/run.yaml b/src/llama_stack/distributions/ci-tests/run.yaml
index 1118d2ad1..b791e1488 100644
--- a/src/llama_stack/distributions/ci-tests/run.yaml
+++ b/src/llama_stack/distributions/ci-tests/run.yaml
@@ -17,44 +17,43 @@ providers:
- provider_id: ${env.CEREBRAS_API_KEY:+cerebras}
provider_type: remote::cerebras
config:
- base_url: https://api.cerebras.ai
+ base_url: https://api.cerebras.ai/v1
api_key: ${env.CEREBRAS_API_KEY:=}
- provider_id: ${env.OLLAMA_URL:+ollama}
provider_type: remote::ollama
config:
- url: ${env.OLLAMA_URL:=http://localhost:11434}
+ base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1}
- provider_id: ${env.VLLM_URL:+vllm}
provider_type: remote::vllm
config:
- url: ${env.VLLM_URL:=}
+ base_url: ${env.VLLM_URL:=}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
- provider_id: ${env.TGI_URL:+tgi}
provider_type: remote::tgi
config:
- url: ${env.TGI_URL:=}
+ base_url: ${env.TGI_URL:=}
- provider_id: fireworks
provider_type: remote::fireworks
config:
- url: https://api.fireworks.ai/inference/v1
+ base_url: https://api.fireworks.ai/inference/v1
api_key: ${env.FIREWORKS_API_KEY:=}
- provider_id: together
provider_type: remote::together
config:
- url: https://api.together.xyz/v1
+ base_url: https://api.together.xyz/v1
api_key: ${env.TOGETHER_API_KEY:=}
- provider_id: bedrock
provider_type: remote::bedrock
config:
- api_key: ${env.AWS_BEDROCK_API_KEY:=}
+ api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=}
region_name: ${env.AWS_DEFAULT_REGION:=us-east-2}
- provider_id: ${env.NVIDIA_API_KEY:+nvidia}
provider_type: remote::nvidia
config:
- url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
+ base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
api_key: ${env.NVIDIA_API_KEY:=}
- append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
- provider_id: openai
provider_type: remote::openai
config:
@@ -76,18 +75,18 @@ providers:
- provider_id: groq
provider_type: remote::groq
config:
- url: https://api.groq.com
+ base_url: https://api.groq.com/openai/v1
api_key: ${env.GROQ_API_KEY:=}
- provider_id: sambanova
provider_type: remote::sambanova
config:
- url: https://api.sambanova.ai/v1
+ base_url: https://api.sambanova.ai/v1
api_key: ${env.SAMBANOVA_API_KEY:=}
- provider_id: ${env.AZURE_API_KEY:+azure}
provider_type: remote::azure
config:
api_key: ${env.AZURE_API_KEY:=}
- api_base: ${env.AZURE_API_BASE:=}
+ base_url: ${env.AZURE_API_BASE:=}
api_version: ${env.AZURE_API_VERSION:=}
api_type: ${env.AZURE_API_TYPE:=}
- provider_id: sentence-transformers
diff --git a/src/llama_stack/distributions/nvidia/run-with-safety.yaml b/src/llama_stack/distributions/nvidia/run-with-safety.yaml
index 1d57ad17a..d2c7dd090 100644
--- a/src/llama_stack/distributions/nvidia/run-with-safety.yaml
+++ b/src/llama_stack/distributions/nvidia/run-with-safety.yaml
@@ -16,9 +16,8 @@ providers:
- provider_id: nvidia
provider_type: remote::nvidia
config:
- url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
+ base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
api_key: ${env.NVIDIA_API_KEY:=}
- append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
- provider_id: nvidia
provider_type: remote::nvidia
config:
diff --git a/src/llama_stack/distributions/nvidia/run.yaml b/src/llama_stack/distributions/nvidia/run.yaml
index 8c50b8bfb..c267587c7 100644
--- a/src/llama_stack/distributions/nvidia/run.yaml
+++ b/src/llama_stack/distributions/nvidia/run.yaml
@@ -16,9 +16,8 @@ providers:
- provider_id: nvidia
provider_type: remote::nvidia
config:
- url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
+ base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
api_key: ${env.NVIDIA_API_KEY:=}
- append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
vector_io:
- provider_id: faiss
provider_type: inline::faiss
diff --git a/src/llama_stack/distributions/open-benchmark/run.yaml b/src/llama_stack/distributions/open-benchmark/run.yaml
index 912e48dd3..7ebc58841 100644
--- a/src/llama_stack/distributions/open-benchmark/run.yaml
+++ b/src/llama_stack/distributions/open-benchmark/run.yaml
@@ -27,12 +27,12 @@ providers:
- provider_id: groq
provider_type: remote::groq
config:
- url: https://api.groq.com
+ base_url: https://api.groq.com/openai/v1
api_key: ${env.GROQ_API_KEY:=}
- provider_id: together
provider_type: remote::together
config:
- url: https://api.together.xyz/v1
+ base_url: https://api.together.xyz/v1
api_key: ${env.TOGETHER_API_KEY:=}
vector_io:
- provider_id: sqlite-vec
diff --git a/src/llama_stack/distributions/postgres-demo/run.yaml b/src/llama_stack/distributions/postgres-demo/run.yaml
index dd1c2bc7f..049f519cd 100644
--- a/src/llama_stack/distributions/postgres-demo/run.yaml
+++ b/src/llama_stack/distributions/postgres-demo/run.yaml
@@ -11,7 +11,7 @@ providers:
- provider_id: vllm-inference
provider_type: remote::vllm
config:
- url: ${env.VLLM_URL:=http://localhost:8000/v1}
+ base_url: ${env.VLLM_URL:=}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
diff --git a/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml b/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml
index e29ada6f4..9c250c05a 100644
--- a/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml
+++ b/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml
@@ -17,44 +17,43 @@ providers:
- provider_id: ${env.CEREBRAS_API_KEY:+cerebras}
provider_type: remote::cerebras
config:
- base_url: https://api.cerebras.ai
+ base_url: https://api.cerebras.ai/v1
api_key: ${env.CEREBRAS_API_KEY:=}
- provider_id: ${env.OLLAMA_URL:+ollama}
provider_type: remote::ollama
config:
- url: ${env.OLLAMA_URL:=http://localhost:11434}
+ base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1}
- provider_id: ${env.VLLM_URL:+vllm}
provider_type: remote::vllm
config:
- url: ${env.VLLM_URL:=}
+ base_url: ${env.VLLM_URL:=}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
- provider_id: ${env.TGI_URL:+tgi}
provider_type: remote::tgi
config:
- url: ${env.TGI_URL:=}
+ base_url: ${env.TGI_URL:=}
- provider_id: fireworks
provider_type: remote::fireworks
config:
- url: https://api.fireworks.ai/inference/v1
+ base_url: https://api.fireworks.ai/inference/v1
api_key: ${env.FIREWORKS_API_KEY:=}
- provider_id: together
provider_type: remote::together
config:
- url: https://api.together.xyz/v1
+ base_url: https://api.together.xyz/v1
api_key: ${env.TOGETHER_API_KEY:=}
- provider_id: bedrock
provider_type: remote::bedrock
config:
- api_key: ${env.AWS_BEDROCK_API_KEY:=}
+ api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=}
region_name: ${env.AWS_DEFAULT_REGION:=us-east-2}
- provider_id: ${env.NVIDIA_API_KEY:+nvidia}
provider_type: remote::nvidia
config:
- url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
+ base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
api_key: ${env.NVIDIA_API_KEY:=}
- append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
- provider_id: openai
provider_type: remote::openai
config:
@@ -76,18 +75,18 @@ providers:
- provider_id: groq
provider_type: remote::groq
config:
- url: https://api.groq.com
+ base_url: https://api.groq.com/openai/v1
api_key: ${env.GROQ_API_KEY:=}
- provider_id: sambanova
provider_type: remote::sambanova
config:
- url: https://api.sambanova.ai/v1
+ base_url: https://api.sambanova.ai/v1
api_key: ${env.SAMBANOVA_API_KEY:=}
- provider_id: ${env.AZURE_API_KEY:+azure}
provider_type: remote::azure
config:
api_key: ${env.AZURE_API_KEY:=}
- api_base: ${env.AZURE_API_BASE:=}
+ base_url: ${env.AZURE_API_BASE:=}
api_version: ${env.AZURE_API_VERSION:=}
api_type: ${env.AZURE_API_TYPE:=}
- provider_id: sentence-transformers
diff --git a/src/llama_stack/distributions/starter-gpu/run.yaml b/src/llama_stack/distributions/starter-gpu/run.yaml
index 7149b8659..65f9ae326 100644
--- a/src/llama_stack/distributions/starter-gpu/run.yaml
+++ b/src/llama_stack/distributions/starter-gpu/run.yaml
@@ -17,44 +17,43 @@ providers:
- provider_id: ${env.CEREBRAS_API_KEY:+cerebras}
provider_type: remote::cerebras
config:
- base_url: https://api.cerebras.ai
+ base_url: https://api.cerebras.ai/v1
api_key: ${env.CEREBRAS_API_KEY:=}
- provider_id: ${env.OLLAMA_URL:+ollama}
provider_type: remote::ollama
config:
- url: ${env.OLLAMA_URL:=http://localhost:11434}
+ base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1}
- provider_id: ${env.VLLM_URL:+vllm}
provider_type: remote::vllm
config:
- url: ${env.VLLM_URL:=}
+ base_url: ${env.VLLM_URL:=}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
- provider_id: ${env.TGI_URL:+tgi}
provider_type: remote::tgi
config:
- url: ${env.TGI_URL:=}
+ base_url: ${env.TGI_URL:=}
- provider_id: fireworks
provider_type: remote::fireworks
config:
- url: https://api.fireworks.ai/inference/v1
+ base_url: https://api.fireworks.ai/inference/v1
api_key: ${env.FIREWORKS_API_KEY:=}
- provider_id: together
provider_type: remote::together
config:
- url: https://api.together.xyz/v1
+ base_url: https://api.together.xyz/v1
api_key: ${env.TOGETHER_API_KEY:=}
- provider_id: bedrock
provider_type: remote::bedrock
config:
- api_key: ${env.AWS_BEDROCK_API_KEY:=}
+ api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=}
region_name: ${env.AWS_DEFAULT_REGION:=us-east-2}
- provider_id: ${env.NVIDIA_API_KEY:+nvidia}
provider_type: remote::nvidia
config:
- url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
+ base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
api_key: ${env.NVIDIA_API_KEY:=}
- append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
- provider_id: openai
provider_type: remote::openai
config:
@@ -76,18 +75,18 @@ providers:
- provider_id: groq
provider_type: remote::groq
config:
- url: https://api.groq.com
+ base_url: https://api.groq.com/openai/v1
api_key: ${env.GROQ_API_KEY:=}
- provider_id: sambanova
provider_type: remote::sambanova
config:
- url: https://api.sambanova.ai/v1
+ base_url: https://api.sambanova.ai/v1
api_key: ${env.SAMBANOVA_API_KEY:=}
- provider_id: ${env.AZURE_API_KEY:+azure}
provider_type: remote::azure
config:
api_key: ${env.AZURE_API_KEY:=}
- api_base: ${env.AZURE_API_BASE:=}
+ base_url: ${env.AZURE_API_BASE:=}
api_version: ${env.AZURE_API_VERSION:=}
api_type: ${env.AZURE_API_TYPE:=}
- provider_id: sentence-transformers
diff --git a/src/llama_stack/distributions/starter/run-with-postgres-store.yaml b/src/llama_stack/distributions/starter/run-with-postgres-store.yaml
index 437674bf9..3314bb9e9 100644
--- a/src/llama_stack/distributions/starter/run-with-postgres-store.yaml
+++ b/src/llama_stack/distributions/starter/run-with-postgres-store.yaml
@@ -17,44 +17,43 @@ providers:
- provider_id: ${env.CEREBRAS_API_KEY:+cerebras}
provider_type: remote::cerebras
config:
- base_url: https://api.cerebras.ai
+ base_url: https://api.cerebras.ai/v1
api_key: ${env.CEREBRAS_API_KEY:=}
- provider_id: ${env.OLLAMA_URL:+ollama}
provider_type: remote::ollama
config:
- url: ${env.OLLAMA_URL:=http://localhost:11434}
+ base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1}
- provider_id: ${env.VLLM_URL:+vllm}
provider_type: remote::vllm
config:
- url: ${env.VLLM_URL:=}
+ base_url: ${env.VLLM_URL:=}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
- provider_id: ${env.TGI_URL:+tgi}
provider_type: remote::tgi
config:
- url: ${env.TGI_URL:=}
+ base_url: ${env.TGI_URL:=}
- provider_id: fireworks
provider_type: remote::fireworks
config:
- url: https://api.fireworks.ai/inference/v1
+ base_url: https://api.fireworks.ai/inference/v1
api_key: ${env.FIREWORKS_API_KEY:=}
- provider_id: together
provider_type: remote::together
config:
- url: https://api.together.xyz/v1
+ base_url: https://api.together.xyz/v1
api_key: ${env.TOGETHER_API_KEY:=}
- provider_id: bedrock
provider_type: remote::bedrock
config:
- api_key: ${env.AWS_BEDROCK_API_KEY:=}
+ api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=}
region_name: ${env.AWS_DEFAULT_REGION:=us-east-2}
- provider_id: ${env.NVIDIA_API_KEY:+nvidia}
provider_type: remote::nvidia
config:
- url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
+ base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
api_key: ${env.NVIDIA_API_KEY:=}
- append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
- provider_id: openai
provider_type: remote::openai
config:
@@ -76,18 +75,18 @@ providers:
- provider_id: groq
provider_type: remote::groq
config:
- url: https://api.groq.com
+ base_url: https://api.groq.com/openai/v1
api_key: ${env.GROQ_API_KEY:=}
- provider_id: sambanova
provider_type: remote::sambanova
config:
- url: https://api.sambanova.ai/v1
+ base_url: https://api.sambanova.ai/v1
api_key: ${env.SAMBANOVA_API_KEY:=}
- provider_id: ${env.AZURE_API_KEY:+azure}
provider_type: remote::azure
config:
api_key: ${env.AZURE_API_KEY:=}
- api_base: ${env.AZURE_API_BASE:=}
+ base_url: ${env.AZURE_API_BASE:=}
api_version: ${env.AZURE_API_VERSION:=}
api_type: ${env.AZURE_API_TYPE:=}
- provider_id: sentence-transformers
diff --git a/src/llama_stack/distributions/starter/run.yaml b/src/llama_stack/distributions/starter/run.yaml
index 0ce392810..e88539e6a 100644
--- a/src/llama_stack/distributions/starter/run.yaml
+++ b/src/llama_stack/distributions/starter/run.yaml
@@ -17,44 +17,43 @@ providers:
- provider_id: ${env.CEREBRAS_API_KEY:+cerebras}
provider_type: remote::cerebras
config:
- base_url: https://api.cerebras.ai
+ base_url: https://api.cerebras.ai/v1
api_key: ${env.CEREBRAS_API_KEY:=}
- provider_id: ${env.OLLAMA_URL:+ollama}
provider_type: remote::ollama
config:
- url: ${env.OLLAMA_URL:=http://localhost:11434}
+ base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1}
- provider_id: ${env.VLLM_URL:+vllm}
provider_type: remote::vllm
config:
- url: ${env.VLLM_URL:=}
+ base_url: ${env.VLLM_URL:=}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
- provider_id: ${env.TGI_URL:+tgi}
provider_type: remote::tgi
config:
- url: ${env.TGI_URL:=}
+ base_url: ${env.TGI_URL:=}
- provider_id: fireworks
provider_type: remote::fireworks
config:
- url: https://api.fireworks.ai/inference/v1
+ base_url: https://api.fireworks.ai/inference/v1
api_key: ${env.FIREWORKS_API_KEY:=}
- provider_id: together
provider_type: remote::together
config:
- url: https://api.together.xyz/v1
+ base_url: https://api.together.xyz/v1
api_key: ${env.TOGETHER_API_KEY:=}
- provider_id: bedrock
provider_type: remote::bedrock
config:
- api_key: ${env.AWS_BEDROCK_API_KEY:=}
+ api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=}
region_name: ${env.AWS_DEFAULT_REGION:=us-east-2}
- provider_id: ${env.NVIDIA_API_KEY:+nvidia}
provider_type: remote::nvidia
config:
- url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
+ base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
api_key: ${env.NVIDIA_API_KEY:=}
- append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
- provider_id: openai
provider_type: remote::openai
config:
@@ -76,18 +75,18 @@ providers:
- provider_id: groq
provider_type: remote::groq
config:
- url: https://api.groq.com
+ base_url: https://api.groq.com/openai/v1
api_key: ${env.GROQ_API_KEY:=}
- provider_id: sambanova
provider_type: remote::sambanova
config:
- url: https://api.sambanova.ai/v1
+ base_url: https://api.sambanova.ai/v1
api_key: ${env.SAMBANOVA_API_KEY:=}
- provider_id: ${env.AZURE_API_KEY:+azure}
provider_type: remote::azure
config:
api_key: ${env.AZURE_API_KEY:=}
- api_base: ${env.AZURE_API_BASE:=}
+ base_url: ${env.AZURE_API_BASE:=}
api_version: ${env.AZURE_API_VERSION:=}
api_type: ${env.AZURE_API_TYPE:=}
- provider_id: sentence-transformers
diff --git a/src/llama_stack/distributions/starter/starter.py b/src/llama_stack/distributions/starter/starter.py
index 4c21a8c99..32264eebb 100644
--- a/src/llama_stack/distributions/starter/starter.py
+++ b/src/llama_stack/distributions/starter/starter.py
@@ -17,6 +17,8 @@ from llama_stack.core.datatypes import (
ToolGroupInput,
VectorStoresConfig,
)
+from llama_stack.core.storage.kvstore.config import PostgresKVStoreConfig
+from llama_stack.core.storage.sqlstore.sqlstore import PostgresSqlStoreConfig
from llama_stack.core.utils.dynamic import instantiate_class_type
from llama_stack.distributions.template import DistributionTemplate, RunConfigSettings
from llama_stack.providers.inline.files.localfs.config import LocalfsFilesImplConfig
@@ -35,8 +37,6 @@ from llama_stack.providers.remote.vector_io.pgvector.config import (
)
from llama_stack.providers.remote.vector_io.qdrant.config import QdrantVectorIOConfig
from llama_stack.providers.remote.vector_io.weaviate.config import WeaviateVectorIOConfig
-from llama_stack.providers.utils.kvstore.config import PostgresKVStoreConfig
-from llama_stack.providers.utils.sqlstore.sqlstore import PostgresSqlStoreConfig
from llama_stack_api import RemoteProviderSpec
diff --git a/src/llama_stack/distributions/template.py b/src/llama_stack/distributions/template.py
index 5755a26de..90b458805 100644
--- a/src/llama_stack/distributions/template.py
+++ b/src/llama_stack/distributions/template.py
@@ -35,13 +35,13 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageBackendType,
)
+from llama_stack.core.storage.kvstore.config import SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore.config import get_pip_packages as get_kv_pip_packages
+from llama_stack.core.storage.sqlstore.sqlstore import SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.sqlstore import get_pip_packages as get_sql_pip_packages
from llama_stack.core.utils.dynamic import instantiate_class_type
from llama_stack.core.utils.image_types import LlamaStackImageType
from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
-from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
-from llama_stack.providers.utils.kvstore.config import get_pip_packages as get_kv_pip_packages
-from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
-from llama_stack.providers.utils.sqlstore.sqlstore import get_pip_packages as get_sql_pip_packages
from llama_stack_api import DatasetPurpose, ModelType
diff --git a/src/llama_stack/distributions/watsonx/run.yaml b/src/llama_stack/distributions/watsonx/run.yaml
index 8456115d2..f8c489fe3 100644
--- a/src/llama_stack/distributions/watsonx/run.yaml
+++ b/src/llama_stack/distributions/watsonx/run.yaml
@@ -15,7 +15,7 @@ providers:
- provider_id: watsonx
provider_type: remote::watsonx
config:
- url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com}
+ base_url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com}
api_key: ${env.WATSONX_API_KEY:=}
project_id: ${env.WATSONX_PROJECT_ID:=}
vector_io:
diff --git a/src/llama_stack/providers/inline/agents/meta_reference/__init__.py b/src/llama_stack/providers/inline/agents/meta_reference/__init__.py
index 91287617a..9683baf00 100644
--- a/src/llama_stack/providers/inline/agents/meta_reference/__init__.py
+++ b/src/llama_stack/providers/inline/agents/meta_reference/__init__.py
@@ -23,12 +23,14 @@ async def get_provider_impl(
config,
deps[Api.inference],
deps[Api.vector_io],
- deps[Api.safety],
+ deps.get(Api.safety),
deps[Api.tool_runtime],
deps[Api.tool_groups],
deps[Api.conversations],
- policy,
+ deps[Api.prompts],
+ deps[Api.files],
telemetry_enabled,
+ policy,
)
await impl.initialize()
return impl
diff --git a/src/llama_stack/providers/inline/agents/meta_reference/agents.py b/src/llama_stack/providers/inline/agents/meta_reference/agents.py
index 347f6fdb1..ca419a51a 100644
--- a/src/llama_stack/providers/inline/agents/meta_reference/agents.py
+++ b/src/llama_stack/providers/inline/agents/meta_reference/agents.py
@@ -6,12 +6,13 @@
from llama_stack.core.datatypes import AccessRule
+from llama_stack.core.storage.kvstore import InmemoryKVStoreImpl, kvstore_impl
from llama_stack.log import get_logger
-from llama_stack.providers.utils.kvstore import InmemoryKVStoreImpl, kvstore_impl
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
from llama_stack_api import (
Agents,
Conversations,
+ Files,
Inference,
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
@@ -22,6 +23,7 @@ from llama_stack_api import (
OpenAIResponsePrompt,
OpenAIResponseText,
Order,
+ Prompts,
ResponseGuardrail,
Safety,
ToolGroups,
@@ -41,10 +43,12 @@ class MetaReferenceAgentsImpl(Agents):
config: MetaReferenceAgentsImplConfig,
inference_api: Inference,
vector_io_api: VectorIO,
- safety_api: Safety,
+ safety_api: Safety | None,
tool_runtime_api: ToolRuntime,
tool_groups_api: ToolGroups,
conversations_api: Conversations,
+ prompts_api: Prompts,
+ files_api: Files,
policy: list[AccessRule],
telemetry_enabled: bool = False,
):
@@ -56,7 +60,8 @@ class MetaReferenceAgentsImpl(Agents):
self.tool_groups_api = tool_groups_api
self.conversations_api = conversations_api
self.telemetry_enabled = telemetry_enabled
-
+ self.prompts_api = prompts_api
+ self.files_api = files_api
self.in_memory_store = InmemoryKVStoreImpl()
self.openai_responses_impl: OpenAIResponsesImpl | None = None
self.policy = policy
@@ -73,6 +78,8 @@ class MetaReferenceAgentsImpl(Agents):
vector_io_api=self.vector_io_api,
safety_api=self.safety_api,
conversations_api=self.conversations_api,
+ prompts_api=self.prompts_api,
+ files_api=self.files_api,
)
async def shutdown(self) -> None:
@@ -92,6 +99,7 @@ class MetaReferenceAgentsImpl(Agents):
model: str,
prompt: OpenAIResponsePrompt | None = None,
instructions: str | None = None,
+ parallel_tool_calls: bool | None = True,
previous_response_id: str | None = None,
conversation: str | None = None,
store: bool | None = True,
@@ -120,6 +128,7 @@ class MetaReferenceAgentsImpl(Agents):
include,
max_infer_iters,
guardrails,
+ parallel_tool_calls,
max_tool_calls,
)
return result # type: ignore[no-any-return]
diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py
index cb0fe284e..c8282df69 100644
--- a/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py
+++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py
@@ -4,6 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
+import re
import time
import uuid
from collections.abc import AsyncIterator
@@ -18,13 +19,17 @@ from llama_stack.providers.utils.responses.responses_store import (
from llama_stack_api import (
ConversationItem,
Conversations,
+ Files,
Inference,
InvalidConversationIdError,
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
+ OpenAIChatCompletionContentPartParam,
OpenAIDeleteResponseObject,
OpenAIMessageParam,
OpenAIResponseInput,
+ OpenAIResponseInputMessageContentFile,
+ OpenAIResponseInputMessageContentImage,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool,
OpenAIResponseMessage,
@@ -34,7 +39,9 @@ from llama_stack_api import (
OpenAIResponseText,
OpenAIResponseTextFormat,
OpenAISystemMessageParam,
+ OpenAIUserMessageParam,
Order,
+ Prompts,
ResponseGuardrailSpec,
Safety,
ToolGroups,
@@ -46,6 +53,7 @@ from .streaming import StreamingResponseOrchestrator
from .tool_executor import ToolExecutor
from .types import ChatCompletionContext, ToolContext
from .utils import (
+ convert_response_content_to_chat_content,
convert_response_input_to_chat_messages,
convert_response_text_to_chat_response_format,
extract_guardrail_ids,
@@ -67,8 +75,10 @@ class OpenAIResponsesImpl:
tool_runtime_api: ToolRuntime,
responses_store: ResponsesStore,
vector_io_api: VectorIO, # VectorIO
- safety_api: Safety,
+ safety_api: Safety | None,
conversations_api: Conversations,
+ prompts_api: Prompts,
+ files_api: Files,
):
self.inference_api = inference_api
self.tool_groups_api = tool_groups_api
@@ -82,6 +92,8 @@ class OpenAIResponsesImpl:
tool_runtime_api=tool_runtime_api,
vector_io_api=vector_io_api,
)
+ self.prompts_api = prompts_api
+ self.files_api = files_api
async def _prepend_previous_response(
self,
@@ -122,11 +134,13 @@ class OpenAIResponsesImpl:
# Use stored messages directly and convert only new input
message_adapter = TypeAdapter(list[OpenAIMessageParam])
messages = message_adapter.validate_python(previous_response.messages)
- new_messages = await convert_response_input_to_chat_messages(input, previous_messages=messages)
+ new_messages = await convert_response_input_to_chat_messages(
+ input, previous_messages=messages, files_api=self.files_api
+ )
messages.extend(new_messages)
else:
# Backward compatibility: reconstruct from inputs
- messages = await convert_response_input_to_chat_messages(all_input)
+ messages = await convert_response_input_to_chat_messages(all_input, files_api=self.files_api)
tool_context.recover_tools_from_previous_response(previous_response)
elif conversation is not None:
@@ -138,7 +152,7 @@ class OpenAIResponsesImpl:
all_input = input
if not conversation_items.data:
# First turn - just convert the new input
- messages = await convert_response_input_to_chat_messages(input)
+ messages = await convert_response_input_to_chat_messages(input, files_api=self.files_api)
else:
if not stored_messages:
all_input = conversation_items.data
@@ -154,14 +168,82 @@ class OpenAIResponsesImpl:
all_input = input
messages = stored_messages or []
- new_messages = await convert_response_input_to_chat_messages(all_input, previous_messages=messages)
+ new_messages = await convert_response_input_to_chat_messages(
+ all_input, previous_messages=messages, files_api=self.files_api
+ )
messages.extend(new_messages)
else:
all_input = input
- messages = await convert_response_input_to_chat_messages(all_input)
+ messages = await convert_response_input_to_chat_messages(all_input, files_api=self.files_api)
return all_input, messages, tool_context
+ async def _prepend_prompt(
+ self,
+ messages: list[OpenAIMessageParam],
+ openai_response_prompt: OpenAIResponsePrompt | None,
+ ) -> None:
+ """Prepend prompt template to messages, resolving text/image/file variables.
+
+ :param messages: List of OpenAIMessageParam objects
+ :param openai_response_prompt: (Optional) OpenAIResponsePrompt object with variables
+ :returns: string of utf-8 characters
+ """
+ if not openai_response_prompt or not openai_response_prompt.id:
+ return
+
+ prompt_version = int(openai_response_prompt.version) if openai_response_prompt.version else None
+ cur_prompt = await self.prompts_api.get_prompt(openai_response_prompt.id, prompt_version)
+
+ if not cur_prompt or not cur_prompt.prompt:
+ return
+
+ cur_prompt_text = cur_prompt.prompt
+ cur_prompt_variables = cur_prompt.variables
+
+ if not openai_response_prompt.variables:
+ messages.insert(0, OpenAISystemMessageParam(content=cur_prompt_text))
+ return
+
+ # Validate that all provided variables exist in the prompt
+ for name in openai_response_prompt.variables.keys():
+ if name not in cur_prompt_variables:
+ raise ValueError(f"Variable {name} not found in prompt {openai_response_prompt.id}")
+
+ # Separate text and media variables
+ text_substitutions = {}
+ media_content_parts: list[OpenAIChatCompletionContentPartParam] = []
+
+ for name, value in openai_response_prompt.variables.items():
+ # Text variable found
+ if isinstance(value, OpenAIResponseInputMessageContentText):
+ text_substitutions[name] = value.text
+
+ # Media variable found
+ elif isinstance(value, OpenAIResponseInputMessageContentImage | OpenAIResponseInputMessageContentFile):
+ converted_parts = await convert_response_content_to_chat_content([value], files_api=self.files_api)
+ if isinstance(converted_parts, list):
+ media_content_parts.extend(converted_parts)
+
+ # Eg: {{product_photo}} becomes "[Image: product_photo]"
+ # This gives the model textual context about what media exists in the prompt
+ var_type = value.type.replace("input_", "").replace("_", " ").title()
+ text_substitutions[name] = f"[{var_type}: {name}]"
+
+ def replace_variable(match: re.Match[str]) -> str:
+ var_name = match.group(1).strip()
+ return str(text_substitutions.get(var_name, match.group(0)))
+
+ pattern = r"\{\{\s*(\w+)\s*\}\}"
+ processed_prompt_text = re.sub(pattern, replace_variable, cur_prompt_text)
+
+ # Insert system message with resolved text
+ messages.insert(0, OpenAISystemMessageParam(content=processed_prompt_text))
+
+ # If we have media, create a new user message because allows to ingest images and files
+ if media_content_parts:
+ messages.append(OpenAIUserMessageParam(content=media_content_parts))
+
async def get_openai_response(
self,
response_id: str,
@@ -252,6 +334,7 @@ class OpenAIResponsesImpl:
include: list[str] | None = None,
max_infer_iters: int | None = 10,
guardrails: list[str | ResponseGuardrailSpec] | None = None,
+ parallel_tool_calls: bool | None = None,
max_tool_calls: int | None = None,
):
stream = bool(stream)
@@ -272,6 +355,14 @@ class OpenAIResponsesImpl:
guardrail_ids = extract_guardrail_ids(guardrails) if guardrails else []
+ # Validate that Safety API is available if guardrails are requested
+ if guardrail_ids and self.safety_api is None:
+ raise ValueError(
+ "Cannot process guardrails: Safety API is not configured.\n\n"
+ "To use guardrails, ensure the Safety API is configured in your stack, or remove "
+ "the 'guardrails' parameter from your request."
+ )
+
if conversation is not None:
if previous_response_id is not None:
raise ValueError(
@@ -288,6 +379,7 @@ class OpenAIResponsesImpl:
input=input,
conversation=conversation,
model=model,
+ prompt=prompt,
instructions=instructions,
previous_response_id=previous_response_id,
store=store,
@@ -296,6 +388,7 @@ class OpenAIResponsesImpl:
tools=tools,
max_infer_iters=max_infer_iters,
guardrail_ids=guardrail_ids,
+ parallel_tool_calls=parallel_tool_calls,
max_tool_calls=max_tool_calls,
)
@@ -340,12 +433,14 @@ class OpenAIResponsesImpl:
instructions: str | None = None,
previous_response_id: str | None = None,
conversation: str | None = None,
+ prompt: OpenAIResponsePrompt | None = None,
store: bool | None = True,
temperature: float | None = None,
text: OpenAIResponseText | None = None,
tools: list[OpenAIResponseInputTool] | None = None,
max_infer_iters: int | None = 10,
guardrail_ids: list[str] | None = None,
+ parallel_tool_calls: bool | None = True,
max_tool_calls: int | None = None,
) -> AsyncIterator[OpenAIResponseObjectStream]:
# These should never be None when called from create_openai_response (which sets defaults)
@@ -361,6 +456,9 @@ class OpenAIResponsesImpl:
if instructions:
messages.insert(0, OpenAISystemMessageParam(content=instructions))
+ # Prepend reusable prompt (if provided)
+ await self._prepend_prompt(messages, prompt)
+
# Structured outputs
response_format = await convert_response_text_to_chat_response_format(text)
@@ -383,8 +481,10 @@ class OpenAIResponsesImpl:
ctx=ctx,
response_id=response_id,
created_at=created_at,
+ prompt=prompt,
text=text,
max_infer_iters=max_infer_iters,
+ parallel_tool_calls=parallel_tool_calls,
tool_executor=self.tool_executor,
safety_api=self.safety_api,
guardrail_ids=guardrail_ids,
diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py
index 95c690147..9e901d88b 100644
--- a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py
+++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py
@@ -66,6 +66,8 @@ from llama_stack_api import (
OpenAIResponseUsage,
OpenAIResponseUsageInputTokensDetails,
OpenAIResponseUsageOutputTokensDetails,
+ OpenAIToolMessageParam,
+ Safety,
WebSearchToolTypes,
)
@@ -111,9 +113,10 @@ class StreamingResponseOrchestrator:
max_infer_iters: int,
tool_executor, # Will be the tool execution logic from the main class
instructions: str | None,
- safety_api,
+ safety_api: Safety | None,
guardrail_ids: list[str] | None = None,
prompt: OpenAIResponsePrompt | None = None,
+ parallel_tool_calls: bool | None = None,
max_tool_calls: int | None = None,
):
self.inference_api = inference_api
@@ -128,6 +131,8 @@ class StreamingResponseOrchestrator:
self.prompt = prompt
# System message that is inserted into the model's context
self.instructions = instructions
+ # Whether to allow more than one function tool call generated per turn.
+ self.parallel_tool_calls = parallel_tool_calls
# Max number of total calls to built-in tools that can be processed in a response
self.max_tool_calls = max_tool_calls
self.sequence_number = 0
@@ -190,6 +195,7 @@ class StreamingResponseOrchestrator:
usage=self.accumulated_usage,
instructions=self.instructions,
prompt=self.prompt,
+ parallel_tool_calls=self.parallel_tool_calls,
max_tool_calls=self.max_tool_calls,
)
@@ -901,10 +907,16 @@ class StreamingResponseOrchestrator:
"""Coordinate execution of both function and non-function tool calls."""
# Execute non-function tool calls
for tool_call in non_function_tool_calls:
- # Check if total calls made to built-in and mcp tools exceed max_tool_calls
+ # if total calls made to built-in and mcp tools exceed max_tool_calls
+ # then create a tool response message indicating the call was skipped
if self.max_tool_calls is not None and self.accumulated_builtin_tool_calls >= self.max_tool_calls:
logger.info(f"Ignoring built-in and mcp tool call since reached the limit of {self.max_tool_calls=}.")
- break
+ skipped_call_message = OpenAIToolMessageParam(
+ content=f"Tool call skipped: maximum tool calls limit ({self.max_tool_calls}) reached.",
+ tool_call_id=tool_call.id,
+ )
+ next_turn_messages.append(skipped_call_message)
+ continue
# Find the item_id for this tool call
matching_item_id = None
diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py
index 943bbae41..7bbf6bd30 100644
--- a/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py
+++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py
@@ -5,11 +5,14 @@
# the root directory of this source tree.
import asyncio
+import base64
+import mimetypes
import re
import uuid
from collections.abc import Sequence
from llama_stack_api import (
+ Files,
OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartParam,
@@ -18,6 +21,8 @@ from llama_stack_api import (
OpenAIChatCompletionToolCallFunction,
OpenAIChoice,
OpenAIDeveloperMessageParam,
+ OpenAIFile,
+ OpenAIFileFile,
OpenAIImageURL,
OpenAIJSONSchema,
OpenAIMessageParam,
@@ -29,6 +34,7 @@ from llama_stack_api import (
OpenAIResponseInput,
OpenAIResponseInputFunctionToolCallOutput,
OpenAIResponseInputMessageContent,
+ OpenAIResponseInputMessageContentFile,
OpenAIResponseInputMessageContentImage,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool,
@@ -37,9 +43,11 @@ from llama_stack_api import (
OpenAIResponseMessage,
OpenAIResponseOutputMessageContent,
OpenAIResponseOutputMessageContentOutputText,
+ OpenAIResponseOutputMessageFileSearchToolCall,
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseOutputMessageMCPCall,
OpenAIResponseOutputMessageMCPListTools,
+ OpenAIResponseOutputMessageWebSearchToolCall,
OpenAIResponseText,
OpenAISystemMessageParam,
OpenAIToolMessageParam,
@@ -49,6 +57,46 @@ from llama_stack_api import (
)
+async def extract_bytes_from_file(file_id: str, files_api: Files) -> bytes:
+ """
+ Extract raw bytes from file using the Files API.
+
+ :param file_id: The file identifier (e.g., "file-abc123")
+ :param files_api: Files API instance
+ :returns: Raw file content as bytes
+ :raises: ValueError if file cannot be retrieved
+ """
+ try:
+ response = await files_api.openai_retrieve_file_content(file_id)
+ return bytes(response.body)
+ except Exception as e:
+ raise ValueError(f"Failed to retrieve file content for file_id '{file_id}': {str(e)}") from e
+
+
+def generate_base64_ascii_text_from_bytes(raw_bytes: bytes) -> str:
+ """
+ Converts raw binary bytes into a safe ASCII text representation for URLs
+
+ :param raw_bytes: the actual bytes that represents file content
+ :returns: string of utf-8 characters
+ """
+ return base64.b64encode(raw_bytes).decode("utf-8")
+
+
+def construct_data_url(ascii_text: str, mime_type: str | None) -> str:
+ """
+ Construct data url with decoded data inside
+
+ :param ascii_text: ASCII content
+ :param mime_type: MIME type of file
+ :returns: data url string (eg. data:image/png,base64,%3Ch1%3EHello%2C%20World%21%3C%2Fh1%3E)
+ """
+ if not mime_type:
+ mime_type = "application/octet-stream"
+
+ return f"data:{mime_type};base64,{ascii_text}"
+
+
async def convert_chat_choice_to_response_message(
choice: OpenAIChoice,
citation_files: dict[str, str] | None = None,
@@ -78,11 +126,15 @@ async def convert_chat_choice_to_response_message(
async def convert_response_content_to_chat_content(
content: str | Sequence[OpenAIResponseInputMessageContent | OpenAIResponseOutputMessageContent],
+ files_api: Files | None,
) -> str | list[OpenAIChatCompletionContentPartParam]:
"""
Convert the content parts from an OpenAI Response API request into OpenAI Chat Completion content parts.
The content schemas of each API look similar, but are not exactly the same.
+
+ :param content: The content to convert
+ :param files_api: Files API for resolving file_id to raw file content (required if content contains files/images)
"""
if isinstance(content, str):
return content
@@ -95,9 +147,68 @@ async def convert_response_content_to_chat_content(
elif isinstance(content_part, OpenAIResponseOutputMessageContentOutputText):
converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part.text))
elif isinstance(content_part, OpenAIResponseInputMessageContentImage):
+ detail = content_part.detail
+ image_mime_type = None
if content_part.image_url:
- image_url = OpenAIImageURL(url=content_part.image_url, detail=content_part.detail)
+ image_url = OpenAIImageURL(url=content_part.image_url, detail=detail)
converted_parts.append(OpenAIChatCompletionContentPartImageParam(image_url=image_url))
+ elif content_part.file_id:
+ if files_api is None:
+ raise ValueError("file_ids are not supported by this implementation of the Stack")
+ image_file_response = await files_api.openai_retrieve_file(content_part.file_id)
+ if image_file_response.filename:
+ image_mime_type, _ = mimetypes.guess_type(image_file_response.filename)
+ raw_image_bytes = await extract_bytes_from_file(content_part.file_id, files_api)
+ ascii_text = generate_base64_ascii_text_from_bytes(raw_image_bytes)
+ image_data_url = construct_data_url(ascii_text, image_mime_type)
+ image_url = OpenAIImageURL(url=image_data_url, detail=detail)
+ converted_parts.append(OpenAIChatCompletionContentPartImageParam(image_url=image_url))
+ else:
+ raise ValueError(
+ f"Image content must have either 'image_url' or 'file_id'. "
+ f"Got image_url={content_part.image_url}, file_id={content_part.file_id}"
+ )
+ elif isinstance(content_part, OpenAIResponseInputMessageContentFile):
+ resolved_file_data = None
+ file_data = content_part.file_data
+ file_id = content_part.file_id
+ file_url = content_part.file_url
+ filename = content_part.filename
+ file_mime_type = None
+ if not any([file_data, file_id, file_url]):
+ raise ValueError(
+ f"File content must have at least one of 'file_data', 'file_id', or 'file_url'. "
+ f"Got file_data={file_data}, file_id={file_id}, file_url={file_url}"
+ )
+ if file_id:
+ if files_api is None:
+ raise ValueError("file_ids are not supported by this implementation of the Stack")
+
+ file_response = await files_api.openai_retrieve_file(file_id)
+ if not filename:
+ filename = file_response.filename
+ file_mime_type, _ = mimetypes.guess_type(file_response.filename)
+ raw_file_bytes = await extract_bytes_from_file(file_id, files_api)
+ ascii_text = generate_base64_ascii_text_from_bytes(raw_file_bytes)
+ resolved_file_data = construct_data_url(ascii_text, file_mime_type)
+ elif file_data:
+ if file_data.startswith("data:"):
+ resolved_file_data = file_data
+ else:
+ # Raw base64 data, wrap in data URL format
+ if filename:
+ file_mime_type, _ = mimetypes.guess_type(filename)
+ resolved_file_data = construct_data_url(file_data, file_mime_type)
+ elif file_url:
+ resolved_file_data = file_url
+ converted_parts.append(
+ OpenAIFile(
+ file=OpenAIFileFile(
+ file_data=resolved_file_data,
+ filename=filename,
+ )
+ )
+ )
elif isinstance(content_part, str):
converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part))
else:
@@ -110,12 +221,14 @@ async def convert_response_content_to_chat_content(
async def convert_response_input_to_chat_messages(
input: str | list[OpenAIResponseInput],
previous_messages: list[OpenAIMessageParam] | None = None,
+ files_api: Files | None = None,
) -> list[OpenAIMessageParam]:
"""
Convert the input from an OpenAI Response API request into OpenAI Chat Completion messages.
:param input: The input to convert
:param previous_messages: Optional previous messages to check for function_call references
+ :param files_api: Files API for resolving file_id to raw file content (optional, required for file/image content)
"""
messages: list[OpenAIMessageParam] = []
if isinstance(input, list):
@@ -169,6 +282,12 @@ async def convert_response_input_to_chat_messages(
elif isinstance(input_item, OpenAIResponseOutputMessageMCPListTools):
# the tool list will be handled separately
pass
+ elif isinstance(
+ input_item,
+ OpenAIResponseOutputMessageWebSearchToolCall | OpenAIResponseOutputMessageFileSearchToolCall,
+ ):
+ # these tool calls are tracked internally but not converted to chat messages
+ pass
elif isinstance(input_item, OpenAIResponseMCPApprovalRequest) or isinstance(
input_item, OpenAIResponseMCPApprovalResponse
):
@@ -176,7 +295,7 @@ async def convert_response_input_to_chat_messages(
pass
elif isinstance(input_item, OpenAIResponseMessage):
# Narrow type to OpenAIResponseMessage which has content and role attributes
- content = await convert_response_content_to_chat_content(input_item.content)
+ content = await convert_response_content_to_chat_content(input_item.content, files_api)
message_type = await get_message_type_by_role(input_item.role)
if message_type is None:
raise ValueError(
@@ -320,11 +439,15 @@ def is_function_tool_call(
return False
-async def run_guardrails(safety_api: Safety, messages: str, guardrail_ids: list[str]) -> str | None:
+async def run_guardrails(safety_api: Safety | None, messages: str, guardrail_ids: list[str]) -> str | None:
"""Run guardrails against messages and return violation message if blocked."""
if not messages:
return None
+ # If safety API is not available, skip guardrails
+ if safety_api is None:
+ return None
+
# Look up shields to get their provider_resource_id (actual model ID)
model_ids = []
# TODO: list_shields not in Safety interface but available at runtime via API routing
diff --git a/src/llama_stack/providers/inline/batches/reference/__init__.py b/src/llama_stack/providers/inline/batches/reference/__init__.py
index 11c4b06a9..b48c82864 100644
--- a/src/llama_stack/providers/inline/batches/reference/__init__.py
+++ b/src/llama_stack/providers/inline/batches/reference/__init__.py
@@ -7,7 +7,7 @@
from typing import Any
from llama_stack.core.datatypes import AccessRule, Api
-from llama_stack.providers.utils.kvstore import kvstore_impl
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack_api import Files, Inference, Models
from .batches import ReferenceBatchesImpl
diff --git a/src/llama_stack/providers/inline/batches/reference/batches.py b/src/llama_stack/providers/inline/batches/reference/batches.py
index b0169a412..57ef939d3 100644
--- a/src/llama_stack/providers/inline/batches/reference/batches.py
+++ b/src/llama_stack/providers/inline/batches/reference/batches.py
@@ -16,8 +16,8 @@ from typing import Any
from openai.types.batch import BatchError, Errors
from pydantic import BaseModel
+from llama_stack.core.storage.kvstore import KVStore
from llama_stack.log import get_logger
-from llama_stack.providers.utils.kvstore import KVStore
from llama_stack_api import (
Batches,
BatchObject,
diff --git a/src/llama_stack/providers/inline/datasetio/localfs/datasetio.py b/src/llama_stack/providers/inline/datasetio/localfs/datasetio.py
index 6ab1a540f..85c7cff3e 100644
--- a/src/llama_stack/providers/inline/datasetio/localfs/datasetio.py
+++ b/src/llama_stack/providers/inline/datasetio/localfs/datasetio.py
@@ -5,8 +5,8 @@
# the root directory of this source tree.
from typing import Any
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_uri
-from llama_stack.providers.utils.kvstore import kvstore_impl
from llama_stack.providers.utils.pagination import paginate_records
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
diff --git a/src/llama_stack/providers/inline/eval/meta_reference/eval.py b/src/llama_stack/providers/inline/eval/meta_reference/eval.py
index d43e569e2..0f0cb84d6 100644
--- a/src/llama_stack/providers/inline/eval/meta_reference/eval.py
+++ b/src/llama_stack/providers/inline/eval/meta_reference/eval.py
@@ -8,8 +8,8 @@ from typing import Any
from tqdm import tqdm
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.providers.utils.common.data_schema_validator import ColumnName
-from llama_stack.providers.utils.kvstore import kvstore_impl
from llama_stack_api import (
Agents,
Benchmark,
diff --git a/src/llama_stack/providers/inline/files/localfs/files.py b/src/llama_stack/providers/inline/files/localfs/files.py
index 5fb35a378..2afe2fe5e 100644
--- a/src/llama_stack/providers/inline/files/localfs/files.py
+++ b/src/llama_stack/providers/inline/files/localfs/files.py
@@ -13,11 +13,10 @@ from fastapi import Depends, File, Form, Response, UploadFile
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.id_generation import generate_object_id
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
+from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.utils.files.form_data import parse_expires_after
-from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
-from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
-from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
from llama_stack_api import (
ExpiresAfter,
Files,
@@ -28,6 +27,7 @@ from llama_stack_api import (
Order,
ResourceNotFoundError,
)
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
from .config import LocalfsFilesImplConfig
diff --git a/src/llama_stack/providers/inline/vector_io/faiss/faiss.py b/src/llama_stack/providers/inline/vector_io/faiss/faiss.py
index d52a54e6a..91a17058b 100644
--- a/src/llama_stack/providers/inline/vector_io/faiss/faiss.py
+++ b/src/llama_stack/providers/inline/vector_io/faiss/faiss.py
@@ -14,9 +14,8 @@ import faiss # type: ignore[import-untyped]
import numpy as np
from numpy.typing import NDArray
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
-from llama_stack.providers.utils.kvstore import kvstore_impl
-from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack_api import (
@@ -32,6 +31,7 @@ from llama_stack_api import (
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
+from llama_stack_api.internal.kvstore import KVStore
from .config import FaissVectorIOConfig
diff --git a/src/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py b/src/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py
index 74bc349a5..a384a33dc 100644
--- a/src/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py
+++ b/src/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py
@@ -14,9 +14,8 @@ import numpy as np
import sqlite_vec # type: ignore[import-untyped]
from numpy.typing import NDArray
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
-from llama_stack.providers.utils.kvstore import kvstore_impl
-from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import (
RERANKER_TYPE_RRF,
@@ -35,6 +34,7 @@ from llama_stack_api import (
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
+from llama_stack_api.internal.kvstore import KVStore
logger = get_logger(name=__name__, category="vector_io")
diff --git a/src/llama_stack/providers/registry/agents.py b/src/llama_stack/providers/registry/agents.py
index 455be1ae7..22bb45faf 100644
--- a/src/llama_stack/providers/registry/agents.py
+++ b/src/llama_stack/providers/registry/agents.py
@@ -5,7 +5,7 @@
# the root directory of this source tree.
-from llama_stack.providers.utils.kvstore import kvstore_dependencies
+from llama_stack.core.storage.kvstore import kvstore_dependencies
from llama_stack_api import (
Api,
InlineProviderSpec,
@@ -30,11 +30,15 @@ def available_providers() -> list[ProviderSpec]:
config_class="llama_stack.providers.inline.agents.meta_reference.MetaReferenceAgentsImplConfig",
api_dependencies=[
Api.inference,
- Api.safety,
Api.vector_io,
Api.tool_runtime,
Api.tool_groups,
Api.conversations,
+ Api.prompts,
+ Api.files,
+ ],
+ optional_api_dependencies=[
+ Api.safety,
],
description="Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks.",
),
diff --git a/src/llama_stack/providers/registry/files.py b/src/llama_stack/providers/registry/files.py
index 024254b57..8ce8acd91 100644
--- a/src/llama_stack/providers/registry/files.py
+++ b/src/llama_stack/providers/registry/files.py
@@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from llama_stack.providers.utils.sqlstore.sqlstore import sql_store_pip_packages
+from llama_stack.core.storage.sqlstore.sqlstore import sql_store_pip_packages
from llama_stack_api import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec
diff --git a/src/llama_stack/providers/remote/datasetio/huggingface/huggingface.py b/src/llama_stack/providers/remote/datasetio/huggingface/huggingface.py
index 72069f716..26390a63b 100644
--- a/src/llama_stack/providers/remote/datasetio/huggingface/huggingface.py
+++ b/src/llama_stack/providers/remote/datasetio/huggingface/huggingface.py
@@ -6,7 +6,7 @@
from typing import Any
from urllib.parse import parse_qs, urlparse
-from llama_stack.providers.utils.kvstore import kvstore_impl
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.providers.utils.pagination import paginate_records
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
diff --git a/src/llama_stack/providers/remote/files/openai/files.py b/src/llama_stack/providers/remote/files/openai/files.py
index d2f5a08eb..2cfd44168 100644
--- a/src/llama_stack/providers/remote/files/openai/files.py
+++ b/src/llama_stack/providers/remote/files/openai/files.py
@@ -10,10 +10,9 @@ from typing import Annotated, Any
from fastapi import Depends, File, Form, Response, UploadFile
from llama_stack.core.datatypes import AccessRule
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
+from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.providers.utils.files.form_data import parse_expires_after
-from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
-from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
-from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
from llama_stack_api import (
ExpiresAfter,
Files,
@@ -24,6 +23,7 @@ from llama_stack_api import (
Order,
ResourceNotFoundError,
)
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
from openai import OpenAI
from .config import OpenAIFilesImplConfig
diff --git a/src/llama_stack/providers/remote/files/s3/files.py b/src/llama_stack/providers/remote/files/s3/files.py
index 68822eb77..3c1c82fa0 100644
--- a/src/llama_stack/providers/remote/files/s3/files.py
+++ b/src/llama_stack/providers/remote/files/s3/files.py
@@ -19,10 +19,9 @@ if TYPE_CHECKING:
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.id_generation import generate_object_id
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
+from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.providers.utils.files.form_data import parse_expires_after
-from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
-from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
-from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
from llama_stack_api import (
ExpiresAfter,
Files,
@@ -33,6 +32,7 @@ from llama_stack_api import (
Order,
ResourceNotFoundError,
)
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
from .config import S3FilesImplConfig
diff --git a/src/llama_stack/providers/remote/inference/azure/azure.py b/src/llama_stack/providers/remote/inference/azure/azure.py
index 134d01b15..c977d75d5 100644
--- a/src/llama_stack/providers/remote/inference/azure/azure.py
+++ b/src/llama_stack/providers/remote/inference/azure/azure.py
@@ -4,8 +4,6 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from urllib.parse import urljoin
-
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from .config import AzureConfig
@@ -22,4 +20,4 @@ class AzureInferenceAdapter(OpenAIMixin):
Returns the Azure API base URL from the configuration.
"""
- return urljoin(str(self.config.api_base), "/openai/v1")
+ return str(self.config.base_url)
diff --git a/src/llama_stack/providers/remote/inference/azure/config.py b/src/llama_stack/providers/remote/inference/azure/config.py
index b801b91b2..f6407a183 100644
--- a/src/llama_stack/providers/remote/inference/azure/config.py
+++ b/src/llama_stack/providers/remote/inference/azure/config.py
@@ -32,8 +32,9 @@ class AzureProviderDataValidator(BaseModel):
@json_schema_type
class AzureConfig(RemoteInferenceProviderConfig):
- api_base: HttpUrl = Field(
- description="Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com)",
+ base_url: HttpUrl | None = Field(
+ default=None,
+ description="Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com/openai/v1)",
)
api_version: str | None = Field(
default_factory=lambda: os.getenv("AZURE_API_VERSION"),
@@ -48,14 +49,14 @@ class AzureConfig(RemoteInferenceProviderConfig):
def sample_run_config(
cls,
api_key: str = "${env.AZURE_API_KEY:=}",
- api_base: str = "${env.AZURE_API_BASE:=}",
+ base_url: str = "${env.AZURE_API_BASE:=}",
api_version: str = "${env.AZURE_API_VERSION:=}",
api_type: str = "${env.AZURE_API_TYPE:=}",
**kwargs,
) -> dict[str, Any]:
return {
"api_key": api_key,
- "api_base": api_base,
+ "base_url": base_url,
"api_version": api_version,
"api_type": api_type,
}
diff --git a/src/llama_stack/providers/remote/inference/bedrock/bedrock.py b/src/llama_stack/providers/remote/inference/bedrock/bedrock.py
index 70ee95916..451549db8 100644
--- a/src/llama_stack/providers/remote/inference/bedrock/bedrock.py
+++ b/src/llama_stack/providers/remote/inference/bedrock/bedrock.py
@@ -37,7 +37,7 @@ class BedrockInferenceAdapter(OpenAIMixin):
"""
config: BedrockConfig
- provider_data_api_key_field: str = "aws_bedrock_api_key"
+ provider_data_api_key_field: str = "aws_bearer_token_bedrock"
def get_base_url(self) -> str:
"""Get base URL for OpenAI client."""
@@ -111,7 +111,7 @@ class BedrockInferenceAdapter(OpenAIMixin):
logger.error(f"AWS Bedrock authentication token expired: {error_msg}")
raise ValueError(
"AWS Bedrock authentication failed: Bearer token has expired. "
- "The AWS_BEDROCK_API_KEY environment variable contains an expired pre-signed URL. "
+ "The AWS_BEARER_TOKEN_BEDROCK environment variable contains an expired pre-signed URL. "
"Please refresh your token by generating a new pre-signed URL with AWS credentials. "
"Refer to AWS Bedrock documentation for details on OpenAI-compatible endpoints."
) from e
diff --git a/src/llama_stack/providers/remote/inference/bedrock/config.py b/src/llama_stack/providers/remote/inference/bedrock/config.py
index 631a6e7ef..f31db63aa 100644
--- a/src/llama_stack/providers/remote/inference/bedrock/config.py
+++ b/src/llama_stack/providers/remote/inference/bedrock/config.py
@@ -12,9 +12,9 @@ from llama_stack.providers.utils.inference.model_registry import RemoteInference
class BedrockProviderDataValidator(BaseModel):
- aws_bedrock_api_key: str | None = Field(
+ aws_bearer_token_bedrock: str | None = Field(
default=None,
- description="API key for Amazon Bedrock",
+ description="API Key (Bearer token) for Amazon Bedrock",
)
@@ -27,6 +27,6 @@ class BedrockConfig(RemoteInferenceProviderConfig):
@classmethod
def sample_run_config(cls, **kwargs):
return {
- "api_key": "${env.AWS_BEDROCK_API_KEY:=}",
+ "api_key": "${env.AWS_BEARER_TOKEN_BEDROCK:=}",
"region_name": "${env.AWS_DEFAULT_REGION:=us-east-2}",
}
diff --git a/src/llama_stack/providers/remote/inference/cerebras/cerebras.py b/src/llama_stack/providers/remote/inference/cerebras/cerebras.py
index 680431e22..23c27df1e 100644
--- a/src/llama_stack/providers/remote/inference/cerebras/cerebras.py
+++ b/src/llama_stack/providers/remote/inference/cerebras/cerebras.py
@@ -4,8 +4,6 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from urllib.parse import urljoin
-
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
OpenAIEmbeddingsRequestWithExtraBody,
@@ -21,7 +19,7 @@ class CerebrasInferenceAdapter(OpenAIMixin):
provider_data_api_key_field: str = "cerebras_api_key"
def get_base_url(self) -> str:
- return urljoin(self.config.base_url, "v1")
+ return str(self.config.base_url)
async def openai_embeddings(
self,
diff --git a/src/llama_stack/providers/remote/inference/cerebras/config.py b/src/llama_stack/providers/remote/inference/cerebras/config.py
index db357fd1c..ea88abbea 100644
--- a/src/llama_stack/providers/remote/inference/cerebras/config.py
+++ b/src/llama_stack/providers/remote/inference/cerebras/config.py
@@ -7,12 +7,12 @@
import os
from typing import Any
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
-DEFAULT_BASE_URL = "https://api.cerebras.ai"
+DEFAULT_BASE_URL = "https://api.cerebras.ai/v1"
class CerebrasProviderDataValidator(BaseModel):
@@ -24,8 +24,8 @@ class CerebrasProviderDataValidator(BaseModel):
@json_schema_type
class CerebrasImplConfig(RemoteInferenceProviderConfig):
- base_url: str = Field(
- default=os.environ.get("CEREBRAS_BASE_URL", DEFAULT_BASE_URL),
+ base_url: HttpUrl | None = Field(
+ default=HttpUrl(os.environ.get("CEREBRAS_BASE_URL", DEFAULT_BASE_URL)),
description="Base URL for the Cerebras API",
)
diff --git a/src/llama_stack/providers/remote/inference/databricks/config.py b/src/llama_stack/providers/remote/inference/databricks/config.py
index bd409fa13..44cb862f9 100644
--- a/src/llama_stack/providers/remote/inference/databricks/config.py
+++ b/src/llama_stack/providers/remote/inference/databricks/config.py
@@ -6,7 +6,7 @@
from typing import Any
-from pydantic import BaseModel, Field, SecretStr
+from pydantic import BaseModel, Field, HttpUrl, SecretStr
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -21,9 +21,9 @@ class DatabricksProviderDataValidator(BaseModel):
@json_schema_type
class DatabricksImplConfig(RemoteInferenceProviderConfig):
- url: str | None = Field(
+ base_url: HttpUrl | None = Field(
default=None,
- description="The URL for the Databricks model serving endpoint",
+ description="The URL for the Databricks model serving endpoint (should include /serving-endpoints path)",
)
auth_credential: SecretStr | None = Field(
default=None,
@@ -34,11 +34,11 @@ class DatabricksImplConfig(RemoteInferenceProviderConfig):
@classmethod
def sample_run_config(
cls,
- url: str = "${env.DATABRICKS_HOST:=}",
+ base_url: str = "${env.DATABRICKS_HOST:=}",
api_token: str = "${env.DATABRICKS_TOKEN:=}",
**kwargs: Any,
) -> dict[str, Any]:
return {
- "url": url,
+ "base_url": base_url,
"api_token": api_token,
}
diff --git a/src/llama_stack/providers/remote/inference/databricks/databricks.py b/src/llama_stack/providers/remote/inference/databricks/databricks.py
index c07d97b67..f2f8832f6 100644
--- a/src/llama_stack/providers/remote/inference/databricks/databricks.py
+++ b/src/llama_stack/providers/remote/inference/databricks/databricks.py
@@ -29,15 +29,21 @@ class DatabricksInferenceAdapter(OpenAIMixin):
}
def get_base_url(self) -> str:
- return f"{self.config.url}/serving-endpoints"
+ return str(self.config.base_url)
async def list_provider_model_ids(self) -> Iterable[str]:
# Filter out None values from endpoint names
api_token = self._get_api_key_from_config_or_provider_data()
+ # WorkspaceClient expects base host without /serving-endpoints suffix
+ base_url_str = str(self.config.base_url)
+ if base_url_str.endswith("/serving-endpoints"):
+ host = base_url_str[:-18] # Remove '/serving-endpoints'
+ else:
+ host = base_url_str
return [
endpoint.name # type: ignore[misc]
for endpoint in WorkspaceClient(
- host=self.config.url, token=api_token
+ host=host, token=api_token
).serving_endpoints.list() # TODO: this is not async
]
diff --git a/src/llama_stack/providers/remote/inference/fireworks/config.py b/src/llama_stack/providers/remote/inference/fireworks/config.py
index e36c76054..c59b5f270 100644
--- a/src/llama_stack/providers/remote/inference/fireworks/config.py
+++ b/src/llama_stack/providers/remote/inference/fireworks/config.py
@@ -6,7 +6,7 @@
from typing import Any
-from pydantic import Field
+from pydantic import Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -14,14 +14,14 @@ from llama_stack_api import json_schema_type
@json_schema_type
class FireworksImplConfig(RemoteInferenceProviderConfig):
- url: str = Field(
- default="https://api.fireworks.ai/inference/v1",
+ base_url: HttpUrl | None = Field(
+ default=HttpUrl("https://api.fireworks.ai/inference/v1"),
description="The URL for the Fireworks server",
)
@classmethod
def sample_run_config(cls, api_key: str = "${env.FIREWORKS_API_KEY:=}", **kwargs) -> dict[str, Any]:
return {
- "url": "https://api.fireworks.ai/inference/v1",
+ "base_url": "https://api.fireworks.ai/inference/v1",
"api_key": api_key,
}
diff --git a/src/llama_stack/providers/remote/inference/fireworks/fireworks.py b/src/llama_stack/providers/remote/inference/fireworks/fireworks.py
index 7e2b73546..61ea0b1f6 100644
--- a/src/llama_stack/providers/remote/inference/fireworks/fireworks.py
+++ b/src/llama_stack/providers/remote/inference/fireworks/fireworks.py
@@ -24,4 +24,4 @@ class FireworksInferenceAdapter(OpenAIMixin):
provider_data_api_key_field: str = "fireworks_api_key"
def get_base_url(self) -> str:
- return "https://api.fireworks.ai/inference/v1"
+ return str(self.config.base_url)
diff --git a/src/llama_stack/providers/remote/inference/groq/config.py b/src/llama_stack/providers/remote/inference/groq/config.py
index cca53a4e8..e5c29c271 100644
--- a/src/llama_stack/providers/remote/inference/groq/config.py
+++ b/src/llama_stack/providers/remote/inference/groq/config.py
@@ -6,7 +6,7 @@
from typing import Any
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -21,14 +21,14 @@ class GroqProviderDataValidator(BaseModel):
@json_schema_type
class GroqConfig(RemoteInferenceProviderConfig):
- url: str = Field(
- default="https://api.groq.com",
+ base_url: HttpUrl | None = Field(
+ default=HttpUrl("https://api.groq.com/openai/v1"),
description="The URL for the Groq AI server",
)
@classmethod
def sample_run_config(cls, api_key: str = "${env.GROQ_API_KEY:=}", **kwargs) -> dict[str, Any]:
return {
- "url": "https://api.groq.com",
+ "base_url": "https://api.groq.com/openai/v1",
"api_key": api_key,
}
diff --git a/src/llama_stack/providers/remote/inference/groq/groq.py b/src/llama_stack/providers/remote/inference/groq/groq.py
index 3a4f2626d..f99de91ca 100644
--- a/src/llama_stack/providers/remote/inference/groq/groq.py
+++ b/src/llama_stack/providers/remote/inference/groq/groq.py
@@ -15,4 +15,4 @@ class GroqInferenceAdapter(OpenAIMixin):
provider_data_api_key_field: str = "groq_api_key"
def get_base_url(self) -> str:
- return f"{self.config.url}/openai/v1"
+ return str(self.config.base_url)
diff --git a/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py b/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py
index ded210d89..a0f80d969 100644
--- a/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py
+++ b/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py
@@ -6,7 +6,7 @@
from typing import Any
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -21,14 +21,14 @@ class LlamaProviderDataValidator(BaseModel):
@json_schema_type
class LlamaCompatConfig(RemoteInferenceProviderConfig):
- openai_compat_api_base: str = Field(
- default="https://api.llama.com/compat/v1/",
+ base_url: HttpUrl | None = Field(
+ default=HttpUrl("https://api.llama.com/compat/v1/"),
description="The URL for the Llama API server",
)
@classmethod
def sample_run_config(cls, api_key: str = "${env.LLAMA_API_KEY}", **kwargs) -> dict[str, Any]:
return {
- "openai_compat_api_base": "https://api.llama.com/compat/v1/",
+ "base_url": "https://api.llama.com/compat/v1/",
"api_key": api_key,
}
diff --git a/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py b/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py
index a5f67ecd1..f29aebf36 100644
--- a/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py
+++ b/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py
@@ -31,7 +31,7 @@ class LlamaCompatInferenceAdapter(OpenAIMixin):
:return: The Llama API base URL
"""
- return self.config.openai_compat_api_base
+ return str(self.config.base_url)
async def openai_completion(
self,
diff --git a/src/llama_stack/providers/remote/inference/nvidia/config.py b/src/llama_stack/providers/remote/inference/nvidia/config.py
index e5b0c6b73..e1e9a0ea9 100644
--- a/src/llama_stack/providers/remote/inference/nvidia/config.py
+++ b/src/llama_stack/providers/remote/inference/nvidia/config.py
@@ -7,7 +7,7 @@
import os
from typing import Any
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -44,18 +44,14 @@ class NVIDIAConfig(RemoteInferenceProviderConfig):
URL of your running NVIDIA NIM and do not need to set the api_key.
"""
- url: str = Field(
- default_factory=lambda: os.getenv("NVIDIA_BASE_URL", "https://integrate.api.nvidia.com"),
+ base_url: HttpUrl | None = Field(
+ default_factory=lambda: os.getenv("NVIDIA_BASE_URL", "https://integrate.api.nvidia.com/v1"),
description="A base url for accessing the NVIDIA NIM",
)
timeout: int = Field(
default=60,
description="Timeout for the HTTP requests",
)
- append_api_version: bool = Field(
- default_factory=lambda: os.getenv("NVIDIA_APPEND_API_VERSION", "True").lower() != "false",
- description="When set to false, the API version will not be appended to the base_url. By default, it is true.",
- )
rerank_model_to_url: dict[str, str] = Field(
default_factory=lambda: {
"nv-rerank-qa-mistral-4b:1": "https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking",
@@ -68,13 +64,11 @@ class NVIDIAConfig(RemoteInferenceProviderConfig):
@classmethod
def sample_run_config(
cls,
- url: str = "${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}",
+ base_url: HttpUrl | None = "${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}",
api_key: str = "${env.NVIDIA_API_KEY:=}",
- append_api_version: bool = "${env.NVIDIA_APPEND_API_VERSION:=True}",
**kwargs,
) -> dict[str, Any]:
return {
- "url": url,
+ "base_url": base_url,
"api_key": api_key,
- "append_api_version": append_api_version,
}
diff --git a/src/llama_stack/providers/remote/inference/nvidia/nvidia.py b/src/llama_stack/providers/remote/inference/nvidia/nvidia.py
index 17f8775bf..5d0d52d6a 100644
--- a/src/llama_stack/providers/remote/inference/nvidia/nvidia.py
+++ b/src/llama_stack/providers/remote/inference/nvidia/nvidia.py
@@ -44,7 +44,7 @@ class NVIDIAInferenceAdapter(OpenAIMixin):
}
async def initialize(self) -> None:
- logger.info(f"Initializing NVIDIAInferenceAdapter({self.config.url})...")
+ logger.info(f"Initializing NVIDIAInferenceAdapter({self.config.base_url})...")
if _is_nvidia_hosted(self.config):
if not self.config.auth_credential:
@@ -72,7 +72,7 @@ class NVIDIAInferenceAdapter(OpenAIMixin):
:return: The NVIDIA API base URL
"""
- return f"{self.config.url}/v1" if self.config.append_api_version else self.config.url
+ return str(self.config.base_url)
async def list_provider_model_ids(self) -> Iterable[str]:
"""
diff --git a/src/llama_stack/providers/remote/inference/nvidia/utils.py b/src/llama_stack/providers/remote/inference/nvidia/utils.py
index 46ee939d9..c138d1fc5 100644
--- a/src/llama_stack/providers/remote/inference/nvidia/utils.py
+++ b/src/llama_stack/providers/remote/inference/nvidia/utils.py
@@ -8,4 +8,4 @@ from . import NVIDIAConfig
def _is_nvidia_hosted(config: NVIDIAConfig) -> bool:
- return "integrate.api.nvidia.com" in config.url
+ return "integrate.api.nvidia.com" in str(config.base_url)
diff --git a/src/llama_stack/providers/remote/inference/ollama/config.py b/src/llama_stack/providers/remote/inference/ollama/config.py
index 416b847a0..60dd34fa8 100644
--- a/src/llama_stack/providers/remote/inference/ollama/config.py
+++ b/src/llama_stack/providers/remote/inference/ollama/config.py
@@ -6,20 +6,22 @@
from typing import Any
-from pydantic import Field, SecretStr
+from pydantic import Field, HttpUrl, SecretStr
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
-DEFAULT_OLLAMA_URL = "http://localhost:11434"
+DEFAULT_OLLAMA_URL = "http://localhost:11434/v1"
class OllamaImplConfig(RemoteInferenceProviderConfig):
auth_credential: SecretStr | None = Field(default=None, exclude=True)
- url: str = DEFAULT_OLLAMA_URL
+ base_url: HttpUrl | None = Field(default=HttpUrl(DEFAULT_OLLAMA_URL))
@classmethod
- def sample_run_config(cls, url: str = "${env.OLLAMA_URL:=http://localhost:11434}", **kwargs) -> dict[str, Any]:
+ def sample_run_config(
+ cls, base_url: str = "${env.OLLAMA_URL:=http://localhost:11434/v1}", **kwargs
+ ) -> dict[str, Any]:
return {
- "url": url,
+ "base_url": base_url,
}
diff --git a/src/llama_stack/providers/remote/inference/ollama/ollama.py b/src/llama_stack/providers/remote/inference/ollama/ollama.py
index d1bf85361..e8b872384 100644
--- a/src/llama_stack/providers/remote/inference/ollama/ollama.py
+++ b/src/llama_stack/providers/remote/inference/ollama/ollama.py
@@ -55,17 +55,23 @@ class OllamaInferenceAdapter(OpenAIMixin):
# ollama client attaches itself to the current event loop (sadly?)
loop = asyncio.get_running_loop()
if loop not in self._clients:
- self._clients[loop] = AsyncOllamaClient(host=self.config.url)
+ # Ollama client expects base URL without /v1 suffix
+ base_url_str = str(self.config.base_url)
+ if base_url_str.endswith("/v1"):
+ host = base_url_str[:-3]
+ else:
+ host = base_url_str
+ self._clients[loop] = AsyncOllamaClient(host=host)
return self._clients[loop]
def get_api_key(self):
return "NO KEY REQUIRED"
def get_base_url(self):
- return self.config.url.rstrip("/") + "/v1"
+ return str(self.config.base_url)
async def initialize(self) -> None:
- logger.info(f"checking connectivity to Ollama at `{self.config.url}`...")
+ logger.info(f"checking connectivity to Ollama at `{self.config.base_url}`...")
r = await self.health()
if r["status"] == HealthStatus.ERROR:
logger.warning(
diff --git a/src/llama_stack/providers/remote/inference/openai/config.py b/src/llama_stack/providers/remote/inference/openai/config.py
index ab28e571f..2057cd0d6 100644
--- a/src/llama_stack/providers/remote/inference/openai/config.py
+++ b/src/llama_stack/providers/remote/inference/openai/config.py
@@ -6,7 +6,7 @@
from typing import Any
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -21,8 +21,8 @@ class OpenAIProviderDataValidator(BaseModel):
@json_schema_type
class OpenAIConfig(RemoteInferenceProviderConfig):
- base_url: str = Field(
- default="https://api.openai.com/v1",
+ base_url: HttpUrl | None = Field(
+ default=HttpUrl("https://api.openai.com/v1"),
description="Base URL for OpenAI API",
)
diff --git a/src/llama_stack/providers/remote/inference/openai/openai.py b/src/llama_stack/providers/remote/inference/openai/openai.py
index 52bc48f1a..2d465546a 100644
--- a/src/llama_stack/providers/remote/inference/openai/openai.py
+++ b/src/llama_stack/providers/remote/inference/openai/openai.py
@@ -35,4 +35,4 @@ class OpenAIInferenceAdapter(OpenAIMixin):
Returns the OpenAI API base URL from the configuration.
"""
- return self.config.base_url
+ return str(self.config.base_url)
diff --git a/src/llama_stack/providers/remote/inference/passthrough/config.py b/src/llama_stack/providers/remote/inference/passthrough/config.py
index 54508b6fb..f45806e79 100644
--- a/src/llama_stack/providers/remote/inference/passthrough/config.py
+++ b/src/llama_stack/providers/remote/inference/passthrough/config.py
@@ -6,7 +6,7 @@
from typing import Any
-from pydantic import Field
+from pydantic import Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -14,16 +14,16 @@ from llama_stack_api import json_schema_type
@json_schema_type
class PassthroughImplConfig(RemoteInferenceProviderConfig):
- url: str = Field(
+ base_url: HttpUrl | None = Field(
default=None,
description="The URL for the passthrough endpoint",
)
@classmethod
def sample_run_config(
- cls, url: str = "${env.PASSTHROUGH_URL}", api_key: str = "${env.PASSTHROUGH_API_KEY}", **kwargs
+ cls, base_url: HttpUrl | None = "${env.PASSTHROUGH_URL}", api_key: str = "${env.PASSTHROUGH_API_KEY}", **kwargs
) -> dict[str, Any]:
return {
- "url": url,
+ "base_url": base_url,
"api_key": api_key,
}
diff --git a/src/llama_stack/providers/remote/inference/passthrough/passthrough.py b/src/llama_stack/providers/remote/inference/passthrough/passthrough.py
index 75eedf026..b0e2e74ad 100644
--- a/src/llama_stack/providers/remote/inference/passthrough/passthrough.py
+++ b/src/llama_stack/providers/remote/inference/passthrough/passthrough.py
@@ -82,8 +82,8 @@ class PassthroughInferenceAdapter(NeedsRequestProviderData, Inference):
def _get_passthrough_url(self) -> str:
"""Get the passthrough URL from config or provider data."""
- if self.config.url is not None:
- return self.config.url
+ if self.config.base_url is not None:
+ return str(self.config.base_url)
provider_data = self.get_request_provider_data()
if provider_data is None:
diff --git a/src/llama_stack/providers/remote/inference/runpod/config.py b/src/llama_stack/providers/remote/inference/runpod/config.py
index 2ee56ca94..8d06f5263 100644
--- a/src/llama_stack/providers/remote/inference/runpod/config.py
+++ b/src/llama_stack/providers/remote/inference/runpod/config.py
@@ -6,7 +6,7 @@
from typing import Any
-from pydantic import BaseModel, Field, SecretStr
+from pydantic import BaseModel, Field, HttpUrl, SecretStr
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -21,7 +21,7 @@ class RunpodProviderDataValidator(BaseModel):
@json_schema_type
class RunpodImplConfig(RemoteInferenceProviderConfig):
- url: str | None = Field(
+ base_url: HttpUrl | None = Field(
default=None,
description="The URL for the Runpod model serving endpoint",
)
@@ -34,6 +34,6 @@ class RunpodImplConfig(RemoteInferenceProviderConfig):
@classmethod
def sample_run_config(cls, **kwargs: Any) -> dict[str, Any]:
return {
- "url": "${env.RUNPOD_URL:=}",
+ "base_url": "${env.RUNPOD_URL:=}",
"api_token": "${env.RUNPOD_API_TOKEN}",
}
diff --git a/src/llama_stack/providers/remote/inference/runpod/runpod.py b/src/llama_stack/providers/remote/inference/runpod/runpod.py
index 9c770cc24..04ad12851 100644
--- a/src/llama_stack/providers/remote/inference/runpod/runpod.py
+++ b/src/llama_stack/providers/remote/inference/runpod/runpod.py
@@ -28,7 +28,7 @@ class RunpodInferenceAdapter(OpenAIMixin):
def get_base_url(self) -> str:
"""Get base URL for OpenAI client."""
- return self.config.url
+ return str(self.config.base_url)
async def openai_chat_completion(
self,
diff --git a/src/llama_stack/providers/remote/inference/sambanova/config.py b/src/llama_stack/providers/remote/inference/sambanova/config.py
index 93679ba99..79cda75a0 100644
--- a/src/llama_stack/providers/remote/inference/sambanova/config.py
+++ b/src/llama_stack/providers/remote/inference/sambanova/config.py
@@ -6,7 +6,7 @@
from typing import Any
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -21,14 +21,14 @@ class SambaNovaProviderDataValidator(BaseModel):
@json_schema_type
class SambaNovaImplConfig(RemoteInferenceProviderConfig):
- url: str = Field(
- default="https://api.sambanova.ai/v1",
+ base_url: HttpUrl | None = Field(
+ default=HttpUrl("https://api.sambanova.ai/v1"),
description="The URL for the SambaNova AI server",
)
@classmethod
def sample_run_config(cls, api_key: str = "${env.SAMBANOVA_API_KEY:=}", **kwargs) -> dict[str, Any]:
return {
- "url": "https://api.sambanova.ai/v1",
+ "base_url": "https://api.sambanova.ai/v1",
"api_key": api_key,
}
diff --git a/src/llama_stack/providers/remote/inference/sambanova/sambanova.py b/src/llama_stack/providers/remote/inference/sambanova/sambanova.py
index daa4b1670..cb01e3a90 100644
--- a/src/llama_stack/providers/remote/inference/sambanova/sambanova.py
+++ b/src/llama_stack/providers/remote/inference/sambanova/sambanova.py
@@ -25,4 +25,4 @@ class SambaNovaInferenceAdapter(OpenAIMixin):
:return: The SambaNova base URL
"""
- return self.config.url
+ return str(self.config.base_url)
diff --git a/src/llama_stack/providers/remote/inference/tgi/config.py b/src/llama_stack/providers/remote/inference/tgi/config.py
index 74edc8523..44cb4b812 100644
--- a/src/llama_stack/providers/remote/inference/tgi/config.py
+++ b/src/llama_stack/providers/remote/inference/tgi/config.py
@@ -5,7 +5,7 @@
# the root directory of this source tree.
-from pydantic import BaseModel, Field, SecretStr
+from pydantic import BaseModel, Field, HttpUrl, SecretStr
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -15,18 +15,19 @@ from llama_stack_api import json_schema_type
class TGIImplConfig(RemoteInferenceProviderConfig):
auth_credential: SecretStr | None = Field(default=None, exclude=True)
- url: str = Field(
- description="The URL for the TGI serving endpoint",
+ base_url: HttpUrl | None = Field(
+ default=None,
+ description="The URL for the TGI serving endpoint (should include /v1 path)",
)
@classmethod
def sample_run_config(
cls,
- url: str = "${env.TGI_URL:=}",
+ base_url: str = "${env.TGI_URL:=}",
**kwargs,
):
return {
- "url": url,
+ "base_url": base_url,
}
diff --git a/src/llama_stack/providers/remote/inference/tgi/tgi.py b/src/llama_stack/providers/remote/inference/tgi/tgi.py
index dd47ccc62..5dc8c33f7 100644
--- a/src/llama_stack/providers/remote/inference/tgi/tgi.py
+++ b/src/llama_stack/providers/remote/inference/tgi/tgi.py
@@ -8,7 +8,7 @@
from collections.abc import Iterable
from huggingface_hub import AsyncInferenceClient, HfApi
-from pydantic import SecretStr
+from pydantic import HttpUrl, SecretStr
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@@ -23,7 +23,7 @@ log = get_logger(name=__name__, category="inference::tgi")
class _HfAdapter(OpenAIMixin):
- url: str
+ base_url: HttpUrl
api_key: SecretStr
hf_client: AsyncInferenceClient
@@ -36,7 +36,7 @@ class _HfAdapter(OpenAIMixin):
return "NO KEY REQUIRED"
def get_base_url(self):
- return self.url
+ return self.base_url
async def list_provider_model_ids(self) -> Iterable[str]:
return [self.model_id]
@@ -50,14 +50,20 @@ class _HfAdapter(OpenAIMixin):
class TGIAdapter(_HfAdapter):
async def initialize(self, config: TGIImplConfig) -> None:
- if not config.url:
+ if not config.base_url:
raise ValueError("You must provide a URL in run.yaml (or via the TGI_URL environment variable) to use TGI.")
- log.info(f"Initializing TGI client with url={config.url}")
- self.hf_client = AsyncInferenceClient(model=config.url, provider="hf-inference")
+ log.info(f"Initializing TGI client with url={config.base_url}")
+ # Extract base URL without /v1 for HF client initialization
+ base_url_str = str(config.base_url).rstrip("/")
+ if base_url_str.endswith("/v1"):
+ base_url_for_client = base_url_str[:-3]
+ else:
+ base_url_for_client = base_url_str
+ self.hf_client = AsyncInferenceClient(model=base_url_for_client, provider="hf-inference")
endpoint_info = await self.hf_client.get_endpoint_info()
self.max_tokens = endpoint_info["max_total_tokens"]
self.model_id = endpoint_info["model_id"]
- self.url = f"{config.url.rstrip('/')}/v1"
+ self.base_url = config.base_url
self.api_key = SecretStr("NO_KEY")
diff --git a/src/llama_stack/providers/remote/inference/together/config.py b/src/llama_stack/providers/remote/inference/together/config.py
index c1b3c4a55..16f0686ba 100644
--- a/src/llama_stack/providers/remote/inference/together/config.py
+++ b/src/llama_stack/providers/remote/inference/together/config.py
@@ -6,7 +6,7 @@
from typing import Any
-from pydantic import Field
+from pydantic import Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -14,14 +14,14 @@ from llama_stack_api import json_schema_type
@json_schema_type
class TogetherImplConfig(RemoteInferenceProviderConfig):
- url: str = Field(
- default="https://api.together.xyz/v1",
+ base_url: HttpUrl | None = Field(
+ default=HttpUrl("https://api.together.xyz/v1"),
description="The URL for the Together AI server",
)
@classmethod
def sample_run_config(cls, **kwargs) -> dict[str, Any]:
return {
- "url": "https://api.together.xyz/v1",
+ "base_url": "https://api.together.xyz/v1",
"api_key": "${env.TOGETHER_API_KEY:=}",
}
diff --git a/src/llama_stack/providers/remote/inference/together/together.py b/src/llama_stack/providers/remote/inference/together/together.py
index cd34aec5e..0826dbcd2 100644
--- a/src/llama_stack/providers/remote/inference/together/together.py
+++ b/src/llama_stack/providers/remote/inference/together/together.py
@@ -9,7 +9,6 @@ from collections.abc import Iterable
from typing import Any, cast
from together import AsyncTogether # type: ignore[import-untyped]
-from together.constants import BASE_URL # type: ignore[import-untyped]
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger
@@ -42,7 +41,7 @@ class TogetherInferenceAdapter(OpenAIMixin, NeedsRequestProviderData):
provider_data_api_key_field: str = "together_api_key"
def get_base_url(self):
- return BASE_URL
+ return str(self.config.base_url)
def _get_client(self) -> AsyncTogether:
together_api_key = None
diff --git a/src/llama_stack/providers/remote/inference/vertexai/vertexai.py b/src/llama_stack/providers/remote/inference/vertexai/vertexai.py
index b91430fd0..7941f8c89 100644
--- a/src/llama_stack/providers/remote/inference/vertexai/vertexai.py
+++ b/src/llama_stack/providers/remote/inference/vertexai/vertexai.py
@@ -51,4 +51,4 @@ class VertexAIInferenceAdapter(OpenAIMixin):
:return: An iterable of model IDs
"""
- return ["vertexai/gemini-2.0-flash", "vertexai/gemini-2.5-flash", "vertexai/gemini-2.5-pro"]
+ return ["google/gemini-2.0-flash", "google/gemini-2.5-flash", "google/gemini-2.5-pro"]
diff --git a/src/llama_stack/providers/remote/inference/vllm/config.py b/src/llama_stack/providers/remote/inference/vllm/config.py
index c43533ee4..db6c74431 100644
--- a/src/llama_stack/providers/remote/inference/vllm/config.py
+++ b/src/llama_stack/providers/remote/inference/vllm/config.py
@@ -6,7 +6,7 @@
from pathlib import Path
-from pydantic import Field, SecretStr, field_validator
+from pydantic import Field, HttpUrl, SecretStr, field_validator
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -14,7 +14,7 @@ from llama_stack_api import json_schema_type
@json_schema_type
class VLLMInferenceAdapterConfig(RemoteInferenceProviderConfig):
- url: str | None = Field(
+ base_url: HttpUrl | None = Field(
default=None,
description="The URL for the vLLM model serving endpoint",
)
@@ -48,11 +48,11 @@ class VLLMInferenceAdapterConfig(RemoteInferenceProviderConfig):
@classmethod
def sample_run_config(
cls,
- url: str = "${env.VLLM_URL:=}",
+ base_url: str = "${env.VLLM_URL:=}",
**kwargs,
):
return {
- "url": url,
+ "base_url": base_url,
"max_tokens": "${env.VLLM_MAX_TOKENS:=4096}",
"api_token": "${env.VLLM_API_TOKEN:=fake}",
"tls_verify": "${env.VLLM_TLS_VERIFY:=true}",
diff --git a/src/llama_stack/providers/remote/inference/vllm/vllm.py b/src/llama_stack/providers/remote/inference/vllm/vllm.py
index 1510e9384..6664ca36b 100644
--- a/src/llama_stack/providers/remote/inference/vllm/vllm.py
+++ b/src/llama_stack/providers/remote/inference/vllm/vllm.py
@@ -39,12 +39,12 @@ class VLLMInferenceAdapter(OpenAIMixin):
def get_base_url(self) -> str:
"""Get the base URL from config."""
- if not self.config.url:
+ if not self.config.base_url:
raise ValueError("No base URL configured")
- return self.config.url
+ return str(self.config.base_url)
async def initialize(self) -> None:
- if not self.config.url:
+ if not self.config.base_url:
raise ValueError(
"You must provide a URL in run.yaml (or via the VLLM_URL environment variable) to use vLLM."
)
diff --git a/src/llama_stack/providers/remote/inference/watsonx/config.py b/src/llama_stack/providers/remote/inference/watsonx/config.py
index 914f80820..be2b2c0ab 100644
--- a/src/llama_stack/providers/remote/inference/watsonx/config.py
+++ b/src/llama_stack/providers/remote/inference/watsonx/config.py
@@ -7,7 +7,7 @@
import os
from typing import Any
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, HttpUrl
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@@ -23,7 +23,7 @@ class WatsonXProviderDataValidator(BaseModel):
@json_schema_type
class WatsonXConfig(RemoteInferenceProviderConfig):
- url: str = Field(
+ base_url: HttpUrl | None = Field(
default_factory=lambda: os.getenv("WATSONX_BASE_URL", "https://us-south.ml.cloud.ibm.com"),
description="A base url for accessing the watsonx.ai",
)
@@ -39,7 +39,7 @@ class WatsonXConfig(RemoteInferenceProviderConfig):
@classmethod
def sample_run_config(cls, **kwargs) -> dict[str, Any]:
return {
- "url": "${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com}",
+ "base_url": "${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com}",
"api_key": "${env.WATSONX_API_KEY:=}",
"project_id": "${env.WATSONX_PROJECT_ID:=}",
}
diff --git a/src/llama_stack/providers/remote/inference/watsonx/watsonx.py b/src/llama_stack/providers/remote/inference/watsonx/watsonx.py
index aab9e2dca..5684f6c17 100644
--- a/src/llama_stack/providers/remote/inference/watsonx/watsonx.py
+++ b/src/llama_stack/providers/remote/inference/watsonx/watsonx.py
@@ -255,7 +255,7 @@ class WatsonXInferenceAdapter(LiteLLMOpenAIMixin):
)
def get_base_url(self) -> str:
- return self.config.url
+ return str(self.config.base_url)
# Copied from OpenAIMixin
async def check_model_availability(self, model: str) -> bool:
@@ -316,7 +316,7 @@ class WatsonXInferenceAdapter(LiteLLMOpenAIMixin):
"""
Retrieves foundation model specifications from the watsonx.ai API.
"""
- url = f"{self.config.url}/ml/v1/foundation_model_specs?version=2023-10-25"
+ url = f"{str(self.config.base_url)}/ml/v1/foundation_model_specs?version=2023-10-25"
headers = {
# Note that there is no authorization header. Listing models does not require authentication.
"Content-Type": "application/json",
diff --git a/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py b/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py
index 649bddecb..97b044dbf 100644
--- a/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py
+++ b/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py
@@ -48,16 +48,10 @@ class ModelContextProtocolToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime
if mcp_endpoint is None:
raise ValueError("mcp_endpoint is required")
- # Phase 1: Support both old header-based auth AND new authorization parameter
- # Get headers and auth from provider data (old approach)
- provider_headers, provider_auth = await self.get_headers_from_request(mcp_endpoint.uri)
+ # Get other headers from provider data (but NOT authorization)
+ provider_headers = await self.get_headers_from_request(mcp_endpoint.uri)
- # New authorization parameter takes precedence over provider data
- final_authorization = authorization or provider_auth
-
- return await list_mcp_tools(
- endpoint=mcp_endpoint.uri, headers=provider_headers, authorization=final_authorization
- )
+ return await list_mcp_tools(endpoint=mcp_endpoint.uri, headers=provider_headers, authorization=authorization)
async def invoke_tool(
self, tool_name: str, kwargs: dict[str, Any], authorization: str | None = None
@@ -69,39 +63,38 @@ class ModelContextProtocolToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime
if urlparse(endpoint).scheme not in ("http", "https"):
raise ValueError(f"Endpoint {endpoint} is not a valid HTTP(S) URL")
- # Phase 1: Support both old header-based auth AND new authorization parameter
- # Get headers and auth from provider data (old approach)
- provider_headers, provider_auth = await self.get_headers_from_request(endpoint)
-
- # New authorization parameter takes precedence over provider data
- final_authorization = authorization or provider_auth
+ # Get other headers from provider data (but NOT authorization)
+ provider_headers = await self.get_headers_from_request(endpoint)
return await invoke_mcp_tool(
endpoint=endpoint,
tool_name=tool_name,
kwargs=kwargs,
headers=provider_headers,
- authorization=final_authorization,
+ authorization=authorization,
)
- async def get_headers_from_request(self, mcp_endpoint_uri: str) -> tuple[dict[str, str], str | None]:
+ async def get_headers_from_request(self, mcp_endpoint_uri: str) -> dict[str, str]:
"""
- Extract headers and authorization from request provider data (Phase 1 backward compatibility).
+ Extract headers from request provider data, excluding authorization.
- Phase 1: Temporarily allows Authorization to be passed via mcp_headers for backward compatibility.
- Phase 2: Will enforce that Authorization should use the dedicated authorization parameter instead.
+ Authorization must be provided via the dedicated authorization parameter.
+ If Authorization is found in mcp_headers, raise an error to guide users to the correct approach.
+
+ Args:
+ mcp_endpoint_uri: The MCP endpoint URI to match against provider data
Returns:
- Tuple of (headers_dict, authorization_token)
- - headers_dict: All headers except Authorization
- - authorization_token: Token from Authorization header (with "Bearer " prefix removed), or None
+ dict[str, str]: Headers dictionary (without Authorization)
+
+ Raises:
+ ValueError: If Authorization header is found in mcp_headers
"""
def canonicalize_uri(uri: str) -> str:
return f"{urlparse(uri).netloc or ''}/{urlparse(uri).path or ''}"
headers = {}
- authorization = None
provider_data = self.get_request_provider_data()
if provider_data and hasattr(provider_data, "mcp_headers") and provider_data.mcp_headers:
@@ -109,17 +102,14 @@ class ModelContextProtocolToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime
if canonicalize_uri(uri) != canonicalize_uri(mcp_endpoint_uri):
continue
- # Phase 1: Extract Authorization from mcp_headers for backward compatibility
- # (Phase 2 will reject this and require the dedicated authorization parameter)
+ # Reject Authorization in mcp_headers - must use authorization parameter
for key in values.keys():
if key.lower() == "authorization":
- # Extract authorization token and strip "Bearer " prefix if present
- auth_value = values[key]
- if auth_value.startswith("Bearer "):
- authorization = auth_value[7:] # Remove "Bearer " prefix
- else:
- authorization = auth_value
- else:
- headers[key] = values[key]
+ raise ValueError(
+ "Authorization cannot be provided via mcp_headers in provider_data. "
+ "Please use the dedicated 'authorization' parameter instead. "
+ "Example: tool_runtime.invoke_tool(..., authorization='your-token')"
+ )
+ headers[key] = values[key]
- return headers, authorization
+ return headers
diff --git a/src/llama_stack/providers/remote/vector_io/chroma/chroma.py b/src/llama_stack/providers/remote/vector_io/chroma/chroma.py
index 645b40661..491db6d4d 100644
--- a/src/llama_stack/providers/remote/vector_io/chroma/chroma.py
+++ b/src/llama_stack/providers/remote/vector_io/chroma/chroma.py
@@ -11,10 +11,9 @@ from urllib.parse import urlparse
import chromadb
from numpy.typing import NDArray
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig
-from llama_stack.providers.utils.kvstore import kvstore_impl
-from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack_api import (
@@ -27,6 +26,7 @@ from llama_stack_api import (
VectorStore,
VectorStoresProtocolPrivate,
)
+from llama_stack_api.internal.kvstore import KVStore
from .config import ChromaVectorIOConfig as RemoteChromaVectorIOConfig
diff --git a/src/llama_stack/providers/remote/vector_io/milvus/milvus.py b/src/llama_stack/providers/remote/vector_io/milvus/milvus.py
index aefa20317..044d678fa 100644
--- a/src/llama_stack/providers/remote/vector_io/milvus/milvus.py
+++ b/src/llama_stack/providers/remote/vector_io/milvus/milvus.py
@@ -11,10 +11,9 @@ from typing import Any
from numpy.typing import NDArray
from pymilvus import AnnSearchRequest, DataType, Function, FunctionType, MilvusClient, RRFRanker, WeightedRanker
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.milvus import MilvusVectorIOConfig as InlineMilvusVectorIOConfig
-from llama_stack.providers.utils.kvstore import kvstore_impl
-from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import (
RERANKER_TYPE_WEIGHTED,
@@ -34,6 +33,7 @@ from llama_stack_api import (
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
+from llama_stack_api.internal.kvstore import KVStore
from .config import MilvusVectorIOConfig as RemoteMilvusVectorIOConfig
diff --git a/src/llama_stack/providers/remote/vector_io/pgvector/pgvector.py b/src/llama_stack/providers/remote/vector_io/pgvector/pgvector.py
index 2901bad97..5c86fb08d 100644
--- a/src/llama_stack/providers/remote/vector_io/pgvector/pgvector.py
+++ b/src/llama_stack/providers/remote/vector_io/pgvector/pgvector.py
@@ -13,10 +13,9 @@ from psycopg2 import sql
from psycopg2.extras import Json, execute_values
from pydantic import BaseModel, TypeAdapter
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
-from llama_stack.providers.utils.kvstore import kvstore_impl
-from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack.providers.utils.vector_io.vector_utils import WeightedInMemoryAggregator, sanitize_collection_name
@@ -31,6 +30,7 @@ from llama_stack_api import (
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
+from llama_stack_api.internal.kvstore import KVStore
from .config import PGVectorVectorIOConfig
diff --git a/src/llama_stack/providers/remote/vector_io/qdrant/qdrant.py b/src/llama_stack/providers/remote/vector_io/qdrant/qdrant.py
index 20ab653d0..4dd78d834 100644
--- a/src/llama_stack/providers/remote/vector_io/qdrant/qdrant.py
+++ b/src/llama_stack/providers/remote/vector_io/qdrant/qdrant.py
@@ -13,9 +13,9 @@ from numpy.typing import NDArray
from qdrant_client import AsyncQdrantClient, models
from qdrant_client.models import PointStruct
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
-from llama_stack.providers.utils.kvstore import kvstore_impl
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack_api import (
diff --git a/src/llama_stack/providers/remote/vector_io/weaviate/weaviate.py b/src/llama_stack/providers/remote/vector_io/weaviate/weaviate.py
index ba3e6b7ea..c15d5f468 100644
--- a/src/llama_stack/providers/remote/vector_io/weaviate/weaviate.py
+++ b/src/llama_stack/providers/remote/vector_io/weaviate/weaviate.py
@@ -13,9 +13,8 @@ from weaviate.classes.init import Auth
from weaviate.classes.query import Filter, HybridFusion
from llama_stack.core.request_headers import NeedsRequestProviderData
+from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
-from llama_stack.providers.utils.kvstore import kvstore_impl
-from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import (
RERANKER_TYPE_RRF,
@@ -35,6 +34,7 @@ from llama_stack_api import (
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
+from llama_stack_api.internal.kvstore import KVStore
from .config import WeaviateVectorIOConfig
diff --git a/src/llama_stack/providers/utils/inference/inference_store.py b/src/llama_stack/providers/utils/inference/inference_store.py
index 49e3af7a1..a8a0cace4 100644
--- a/src/llama_stack/providers/utils/inference/inference_store.py
+++ b/src/llama_stack/providers/utils/inference/inference_store.py
@@ -10,6 +10,8 @@ from sqlalchemy.exc import IntegrityError
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.storage.datatypes import InferenceStoreReference, StorageBackendType
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
+from llama_stack.core.storage.sqlstore.sqlstore import _SQLSTORE_BACKENDS, sqlstore_impl
from llama_stack.log import get_logger
from llama_stack_api import (
ListOpenAIChatCompletionResponse,
@@ -18,10 +20,7 @@ from llama_stack_api import (
OpenAIMessageParam,
Order,
)
-
-from ..sqlstore.api import ColumnDefinition, ColumnType
-from ..sqlstore.authorized_sqlstore import AuthorizedSqlStore
-from ..sqlstore.sqlstore import _SQLSTORE_BACKENDS, sqlstore_impl
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
logger = get_logger(name=__name__, category="inference")
diff --git a/src/llama_stack/providers/utils/inference/openai_compat.py b/src/llama_stack/providers/utils/inference/openai_compat.py
index 32d41ffde..3ce7d361d 100644
--- a/src/llama_stack/providers/utils/inference/openai_compat.py
+++ b/src/llama_stack/providers/utils/inference/openai_compat.py
@@ -3,23 +3,10 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from collections.abc import Iterable
from typing import (
Any,
)
-from openai.types.chat import (
- ChatCompletionContentPartParam as OpenAIChatCompletionContentPartParam,
-)
-
-try:
- from openai.types.chat import (
- ChatCompletionMessageFunctionToolCall as OpenAIChatCompletionMessageFunctionToolCall,
- )
-except ImportError:
- from openai.types.chat.chat_completion_message_tool_call import (
- ChatCompletionMessageToolCall as OpenAIChatCompletionMessageFunctionToolCall,
- )
from openai.types.chat import (
ChatCompletionMessageToolCall,
)
@@ -32,18 +19,6 @@ from llama_stack.models.llama.datatypes import (
ToolCall,
ToolDefinition,
)
-from llama_stack_api import (
- URL,
- GreedySamplingStrategy,
- ImageContentItem,
- JsonSchemaResponseFormat,
- OpenAIResponseFormatParam,
- SamplingParams,
- TextContentItem,
- TopKSamplingStrategy,
- TopPSamplingStrategy,
- _URLOrData,
-)
logger = get_logger(name=__name__, category="providers::utils")
@@ -73,42 +48,6 @@ class OpenAICompatCompletionResponse(BaseModel):
choices: list[OpenAICompatCompletionChoice]
-def get_sampling_strategy_options(params: SamplingParams) -> dict:
- options = {}
- if isinstance(params.strategy, GreedySamplingStrategy):
- options["temperature"] = 0.0
- elif isinstance(params.strategy, TopPSamplingStrategy):
- if params.strategy.temperature is not None:
- options["temperature"] = params.strategy.temperature
- if params.strategy.top_p is not None:
- options["top_p"] = params.strategy.top_p
- elif isinstance(params.strategy, TopKSamplingStrategy):
- options["top_k"] = params.strategy.top_k
- else:
- raise ValueError(f"Unsupported sampling strategy: {params.strategy}")
-
- return options
-
-
-def get_sampling_options(params: SamplingParams | None) -> dict:
- if not params:
- return {}
-
- options = {}
- if params:
- options.update(get_sampling_strategy_options(params))
- if params.max_tokens:
- options["max_tokens"] = params.max_tokens
-
- if params.repetition_penalty is not None and params.repetition_penalty != 1.0:
- options["repeat_penalty"] = params.repetition_penalty
-
- if params.stop is not None:
- options["stop"] = params.stop
-
- return options
-
-
def text_from_choice(choice) -> str:
if hasattr(choice, "delta") and choice.delta:
return choice.delta.content # type: ignore[no-any-return] # external OpenAI types lack precise annotations
@@ -253,154 +192,6 @@ def convert_tooldef_to_openai_tool(tool: ToolDefinition) -> dict:
return out
-def _convert_stop_reason_to_openai_finish_reason(stop_reason: StopReason) -> str:
- """
- Convert a StopReason to an OpenAI chat completion finish_reason.
- """
- return {
- StopReason.end_of_turn: "stop",
- StopReason.end_of_message: "tool_calls",
- StopReason.out_of_tokens: "length",
- }.get(stop_reason, "stop")
-
-
-def _convert_openai_finish_reason(finish_reason: str) -> StopReason:
- """
- Convert an OpenAI chat completion finish_reason to a StopReason.
-
- finish_reason: Literal["stop", "length", "tool_calls", ...]
- - stop: model hit a natural stop point or a provided stop sequence
- - length: maximum number of tokens specified in the request was reached
- - tool_calls: model called a tool
-
- ->
-
- class StopReason(Enum):
- end_of_turn = "end_of_turn"
- end_of_message = "end_of_message"
- out_of_tokens = "out_of_tokens"
- """
-
- # TODO(mf): are end_of_turn and end_of_message semantics correct?
- return {
- "stop": StopReason.end_of_turn,
- "length": StopReason.out_of_tokens,
- "tool_calls": StopReason.end_of_message,
- }.get(finish_reason, StopReason.end_of_turn)
-
-
-def _convert_openai_request_tools(tools: list[dict[str, Any]] | None = None) -> list[ToolDefinition]:
- lls_tools: list[ToolDefinition] = []
- if not tools:
- return lls_tools
-
- for tool in tools:
- tool_fn = tool.get("function", {})
- tool_name = tool_fn.get("name", None)
- tool_desc = tool_fn.get("description", None)
- tool_params = tool_fn.get("parameters", None)
-
- lls_tool = ToolDefinition(
- tool_name=tool_name,
- description=tool_desc,
- input_schema=tool_params, # Pass through entire JSON Schema
- )
- lls_tools.append(lls_tool)
- return lls_tools
-
-
-def _convert_openai_request_response_format(
- response_format: OpenAIResponseFormatParam | None = None,
-):
- if not response_format:
- return None
- # response_format can be a dict or a pydantic model
- response_format_dict = dict(response_format) # type: ignore[arg-type] # OpenAIResponseFormatParam union needs dict conversion
- if response_format_dict.get("type", "") == "json_schema":
- return JsonSchemaResponseFormat(
- type="json_schema", # type: ignore[arg-type] # Literal["json_schema"] incompatible with expected type
- json_schema=response_format_dict.get("json_schema", {}).get("schema", ""),
- )
- return None
-
-
-def _convert_openai_tool_calls(
- tool_calls: list[OpenAIChatCompletionMessageFunctionToolCall],
-) -> list[ToolCall]:
- """
- Convert an OpenAI ChatCompletionMessageToolCall list into a list of ToolCall.
-
- OpenAI ChatCompletionMessageToolCall:
- id: str
- function: Function
- type: Literal["function"]
-
- OpenAI Function:
- arguments: str
- name: str
-
- ->
-
- ToolCall:
- call_id: str
- tool_name: str
- arguments: Dict[str, ...]
- """
- if not tool_calls:
- return [] # CompletionMessage tool_calls is not optional
-
- return [
- ToolCall(
- call_id=call.id,
- tool_name=call.function.name,
- arguments=call.function.arguments,
- )
- for call in tool_calls
- ]
-
-
-def _convert_openai_sampling_params(
- max_tokens: int | None = None,
- temperature: float | None = None,
- top_p: float | None = None,
-) -> SamplingParams:
- sampling_params = SamplingParams()
-
- if max_tokens:
- sampling_params.max_tokens = max_tokens
-
- # Map an explicit temperature of 0 to greedy sampling
- if temperature == 0:
- sampling_params.strategy = GreedySamplingStrategy()
- else:
- # OpenAI defaults to 1.0 for temperature and top_p if unset
- if temperature is None:
- temperature = 1.0
- if top_p is None:
- top_p = 1.0
- sampling_params.strategy = TopPSamplingStrategy(temperature=temperature, top_p=top_p) # type: ignore[assignment] # SamplingParams.strategy union accepts this type
-
- return sampling_params
-
-
-def openai_content_to_content(content: str | Iterable[OpenAIChatCompletionContentPartParam] | None):
- if content is None:
- return ""
- if isinstance(content, str):
- return content
- elif isinstance(content, list):
- return [openai_content_to_content(c) for c in content]
- elif hasattr(content, "type"):
- if content.type == "text":
- return TextContentItem(type="text", text=content.text) # type: ignore[attr-defined] # Iterable narrowed by hasattr check but mypy doesn't track
- elif content.type == "image_url":
- return ImageContentItem(type="image", image=_URLOrData(url=URL(uri=content.image_url.url))) # type: ignore[attr-defined] # Iterable narrowed by hasattr check but mypy doesn't track
- else:
- raise ValueError(f"Unknown content type: {content.type}")
- else:
- raise ValueError(f"Unknown content type: {content}")
-
-
async def prepare_openai_completion_params(**params):
async def _prepare_value(value: Any) -> Any:
new_value = value
diff --git a/src/llama_stack/providers/utils/inference/openai_mixin.py b/src/llama_stack/providers/utils/inference/openai_mixin.py
index 559ac90ce..30511a341 100644
--- a/src/llama_stack/providers/utils/inference/openai_mixin.py
+++ b/src/llama_stack/providers/utils/inference/openai_mixin.py
@@ -213,6 +213,19 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
return api_key
+ def _validate_model_allowed(self, provider_model_id: str) -> None:
+ """
+ Validate that the model is in the allowed_models list if configured.
+
+ :param provider_model_id: The provider-specific model ID to validate
+ :raises ValueError: If the model is not in the allowed_models list
+ """
+ if self.config.allowed_models is not None and provider_model_id not in self.config.allowed_models:
+ raise ValueError(
+ f"Model '{provider_model_id}' is not in the allowed models list. "
+ f"Allowed models: {self.config.allowed_models}"
+ )
+
async def _get_provider_model_id(self, model: str) -> str:
"""
Get the provider-specific model ID from the model store.
@@ -259,8 +272,11 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
Direct OpenAI completion API call.
"""
# TODO: fix openai_completion to return type compatible with OpenAI's API response
+ provider_model_id = await self._get_provider_model_id(params.model)
+ self._validate_model_allowed(provider_model_id)
+
completion_kwargs = await prepare_openai_completion_params(
- model=await self._get_provider_model_id(params.model),
+ model=provider_model_id,
prompt=params.prompt,
best_of=params.best_of,
echo=params.echo,
@@ -292,6 +308,9 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
"""
Direct OpenAI chat completion API call.
"""
+ provider_model_id = await self._get_provider_model_id(params.model)
+ self._validate_model_allowed(provider_model_id)
+
messages = params.messages
if self.download_images:
@@ -313,7 +332,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
messages = [await _localize_image_url(m) for m in messages]
request_params = await prepare_openai_completion_params(
- model=await self._get_provider_model_id(params.model),
+ model=provider_model_id,
messages=messages,
frequency_penalty=params.frequency_penalty,
function_call=params.function_call,
@@ -351,10 +370,13 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
"""
Direct OpenAI embeddings API call.
"""
+ provider_model_id = await self._get_provider_model_id(params.model)
+ self._validate_model_allowed(provider_model_id)
+
# Build request params conditionally to avoid NotGiven/Omit type mismatch
# The OpenAI SDK uses Omit in signatures but NOT_GIVEN has type NotGiven
request_params: dict[str, Any] = {
- "model": await self._get_provider_model_id(params.model),
+ "model": provider_model_id,
"input": params.input,
}
if params.encoding_format is not None:
diff --git a/src/llama_stack/providers/utils/kvstore/sqlite/config.py b/src/llama_stack/providers/utils/kvstore/sqlite/config.py
deleted file mode 100644
index 0f8fa0a95..000000000
--- a/src/llama_stack/providers/utils/kvstore/sqlite/config.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-from pydantic import BaseModel, Field
-
-from llama_stack_api import json_schema_type
-
-
-@json_schema_type
-class SqliteControlPlaneConfig(BaseModel):
- db_path: str = Field(
- description="File path for the sqlite database",
- )
- table_name: str = Field(
- default="llamastack_control_plane",
- description="Table into which all the keys will be placed",
- )
diff --git a/src/llama_stack/providers/utils/memory/openai_vector_store_mixin.py b/src/llama_stack/providers/utils/memory/openai_vector_store_mixin.py
index 540ff5940..bbfd60e25 100644
--- a/src/llama_stack/providers/utils/memory/openai_vector_store_mixin.py
+++ b/src/llama_stack/providers/utils/memory/openai_vector_store_mixin.py
@@ -17,7 +17,6 @@ from pydantic import TypeAdapter
from llama_stack.core.id_generation import generate_object_id
from llama_stack.log import get_logger
-from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.vector_store import (
ChunkForDeletion,
content_from_data_and_mime_type,
@@ -53,6 +52,7 @@ from llama_stack_api import (
VectorStoreSearchResponse,
VectorStoreSearchResponsePage,
)
+from llama_stack_api.internal.kvstore import KVStore
EMBEDDING_DIMENSION = 768
diff --git a/src/llama_stack/providers/utils/responses/responses_store.py b/src/llama_stack/providers/utils/responses/responses_store.py
index f6e7c435d..0401db206 100644
--- a/src/llama_stack/providers/utils/responses/responses_store.py
+++ b/src/llama_stack/providers/utils/responses/responses_store.py
@@ -6,6 +6,8 @@
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqlStoreReference
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
+from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.log import get_logger
from llama_stack_api import (
ListOpenAIResponseInputItem,
@@ -17,10 +19,7 @@ from llama_stack_api import (
OpenAIResponseObjectWithInput,
Order,
)
-
-from ..sqlstore.api import ColumnDefinition, ColumnType
-from ..sqlstore.authorized_sqlstore import AuthorizedSqlStore
-from ..sqlstore.sqlstore import sqlstore_impl
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
logger = get_logger(name=__name__, category="openai_responses")
diff --git a/src/llama_stack/providers/utils/sqlstore/api.py b/src/llama_stack/providers/utils/sqlstore/api.py
deleted file mode 100644
index 708fc7095..000000000
--- a/src/llama_stack/providers/utils/sqlstore/api.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-from collections.abc import Mapping, Sequence
-from enum import Enum
-from typing import Any, Literal, Protocol
-
-from pydantic import BaseModel
-
-from llama_stack_api import PaginatedResponse
-
-
-class ColumnType(Enum):
- INTEGER = "INTEGER"
- STRING = "STRING"
- TEXT = "TEXT"
- FLOAT = "FLOAT"
- BOOLEAN = "BOOLEAN"
- JSON = "JSON"
- DATETIME = "DATETIME"
-
-
-class ColumnDefinition(BaseModel):
- type: ColumnType
- primary_key: bool = False
- nullable: bool = True
- default: Any = None
-
-
-class SqlStore(Protocol):
- """
- A protocol for a SQL store.
- """
-
- async def create_table(self, table: str, schema: Mapping[str, ColumnType | ColumnDefinition]) -> None:
- """
- Create a table.
- """
- pass
-
- async def insert(self, table: str, data: Mapping[str, Any] | Sequence[Mapping[str, Any]]) -> None:
- """
- Insert a row or batch of rows into a table.
- """
- pass
-
- async def upsert(
- self,
- table: str,
- data: Mapping[str, Any],
- conflict_columns: list[str],
- update_columns: list[str] | None = None,
- ) -> None:
- """
- Insert a row and update specified columns when conflicts occur.
- """
- pass
-
- async def fetch_all(
- self,
- table: str,
- where: Mapping[str, Any] | None = None,
- where_sql: str | None = None,
- limit: int | None = None,
- order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
- cursor: tuple[str, str] | None = None,
- ) -> PaginatedResponse:
- """
- Fetch all rows from a table with optional cursor-based pagination.
-
- :param table: The table name
- :param where: Simple key-value WHERE conditions
- :param where_sql: Raw SQL WHERE clause for complex queries
- :param limit: Maximum number of records to return
- :param order_by: List of (column, order) tuples for sorting
- :param cursor: Tuple of (key_column, cursor_id) for pagination (None for first page)
- Requires order_by with exactly one column when used
- :return: PaginatedResult with data and has_more flag
-
- Note: Cursor pagination only supports single-column ordering for simplicity.
- Multi-column ordering is allowed without cursor but will raise an error with cursor.
- """
- pass
-
- async def fetch_one(
- self,
- table: str,
- where: Mapping[str, Any] | None = None,
- where_sql: str | None = None,
- order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
- ) -> dict[str, Any] | None:
- """
- Fetch one row from a table.
- """
- pass
-
- async def update(
- self,
- table: str,
- data: Mapping[str, Any],
- where: Mapping[str, Any],
- ) -> None:
- """
- Update a row in a table.
- """
- pass
-
- async def delete(
- self,
- table: str,
- where: Mapping[str, Any],
- ) -> None:
- """
- Delete a row from a table.
- """
- pass
-
- async def add_column_if_not_exists(
- self,
- table: str,
- column_name: str,
- column_type: ColumnType,
- nullable: bool = True,
- ) -> None:
- """
- Add a column to an existing table if the column doesn't already exist.
-
- This is useful for table migrations when adding new functionality.
- If the table doesn't exist, this method should do nothing.
- If the column already exists, this method should do nothing.
-
- :param table: Table name
- :param column_name: Name of the column to add
- :param column_type: Type of the column to add
- :param nullable: Whether the column should be nullable (default: True)
- """
- pass
diff --git a/src/llama_stack_api/agents.py b/src/llama_stack_api/agents.py
index ca0611746..9b767608a 100644
--- a/src/llama_stack_api/agents.py
+++ b/src/llama_stack_api/agents.py
@@ -72,6 +72,7 @@ class Agents(Protocol):
model: str,
prompt: OpenAIResponsePrompt | None = None,
instructions: str | None = None,
+ parallel_tool_calls: bool | None = True,
previous_response_id: str | None = None,
conversation: str | None = None,
store: bool | None = True,
diff --git a/src/llama_stack/providers/utils/sqlstore/__init__.py b/src/llama_stack_api/internal/__init__.py
similarity index 65%
rename from src/llama_stack/providers/utils/sqlstore/__init__.py
rename to src/llama_stack_api/internal/__init__.py
index 756f351d8..bbf7010c3 100644
--- a/src/llama_stack/providers/utils/sqlstore/__init__.py
+++ b/src/llama_stack_api/internal/__init__.py
@@ -3,3 +3,7 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
+
+# Internal subpackage for shared interfaces that are not part of the public API.
+
+__all__: list[str] = []
diff --git a/src/llama_stack/providers/utils/kvstore/api.py b/src/llama_stack_api/internal/kvstore.py
similarity index 89%
rename from src/llama_stack/providers/utils/kvstore/api.py
rename to src/llama_stack_api/internal/kvstore.py
index d17dc66e1..a6d982261 100644
--- a/src/llama_stack/providers/utils/kvstore/api.py
+++ b/src/llama_stack_api/internal/kvstore.py
@@ -9,6 +9,8 @@ from typing import Protocol
class KVStore(Protocol):
+ """Protocol for simple key/value storage backends."""
+
# TODO: make the value type bytes instead of str
async def set(self, key: str, value: str, expiration: datetime | None = None) -> None: ...
@@ -19,3 +21,6 @@ class KVStore(Protocol):
async def values_in_range(self, start_key: str, end_key: str) -> list[str]: ...
async def keys_in_range(self, start_key: str, end_key: str) -> list[str]: ...
+
+
+__all__ = ["KVStore"]
diff --git a/src/llama_stack_api/internal/sqlstore.py b/src/llama_stack_api/internal/sqlstore.py
new file mode 100644
index 000000000..ebb2d8ba2
--- /dev/null
+++ b/src/llama_stack_api/internal/sqlstore.py
@@ -0,0 +1,79 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from collections.abc import Mapping, Sequence
+from enum import Enum
+from typing import Any, Literal, Protocol
+
+from pydantic import BaseModel
+
+from llama_stack_api import PaginatedResponse
+
+
+class ColumnType(Enum):
+ INTEGER = "INTEGER"
+ STRING = "STRING"
+ TEXT = "TEXT"
+ FLOAT = "FLOAT"
+ BOOLEAN = "BOOLEAN"
+ JSON = "JSON"
+ DATETIME = "DATETIME"
+
+
+class ColumnDefinition(BaseModel):
+ type: ColumnType
+ primary_key: bool = False
+ nullable: bool = True
+ default: Any = None
+
+
+class SqlStore(Protocol):
+ """Protocol for common SQL-store functionality."""
+
+ async def create_table(self, table: str, schema: Mapping[str, ColumnType | ColumnDefinition]) -> None: ...
+
+ async def insert(self, table: str, data: Mapping[str, Any] | Sequence[Mapping[str, Any]]) -> None: ...
+
+ async def upsert(
+ self,
+ table: str,
+ data: Mapping[str, Any],
+ conflict_columns: list[str],
+ update_columns: list[str] | None = None,
+ ) -> None: ...
+
+ async def fetch_all(
+ self,
+ table: str,
+ where: Mapping[str, Any] | None = None,
+ where_sql: str | None = None,
+ limit: int | None = None,
+ order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
+ cursor: tuple[str, str] | None = None,
+ ) -> PaginatedResponse: ...
+
+ async def fetch_one(
+ self,
+ table: str,
+ where: Mapping[str, Any] | None = None,
+ where_sql: str | None = None,
+ order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
+ ) -> dict[str, Any] | None: ...
+
+ async def update(self, table: str, data: Mapping[str, Any], where: Mapping[str, Any]) -> None: ...
+
+ async def delete(self, table: str, where: Mapping[str, Any]) -> None: ...
+
+ async def add_column_if_not_exists(
+ self,
+ table: str,
+ column_name: str,
+ column_type: ColumnType,
+ nullable: bool = True,
+ ) -> None: ...
+
+
+__all__ = ["ColumnDefinition", "ColumnType", "SqlStore"]
diff --git a/src/llama_stack_api/openai_responses.py b/src/llama_stack_api/openai_responses.py
index 952418f1c..e20004487 100644
--- a/src/llama_stack_api/openai_responses.py
+++ b/src/llama_stack_api/openai_responses.py
@@ -585,7 +585,7 @@ class OpenAIResponseObject(BaseModel):
:param model: Model identifier used for generation
:param object: Object type identifier, always "response"
:param output: List of generated output items (messages, tool calls, etc.)
- :param parallel_tool_calls: Whether tool calls can be executed in parallel
+ :param parallel_tool_calls: (Optional) Whether to allow more than one function tool call generated per turn.
:param previous_response_id: (Optional) ID of the previous response in a conversation
:param prompt: (Optional) Reference to a prompt template and its variables.
:param status: Current status of the response generation
@@ -605,7 +605,7 @@ class OpenAIResponseObject(BaseModel):
model: str
object: Literal["response"] = "response"
output: Sequence[OpenAIResponseOutput]
- parallel_tool_calls: bool = False
+ parallel_tool_calls: bool | None = True
previous_response_id: str | None = None
prompt: OpenAIResponsePrompt | None = None
status: str
diff --git a/src/llama_stack_api/vector_io.py b/src/llama_stack_api/vector_io.py
index bfad644cc..135468d19 100644
--- a/src/llama_stack_api/vector_io.py
+++ b/src/llama_stack_api/vector_io.py
@@ -11,7 +11,7 @@
from typing import Annotated, Any, Literal, Protocol, runtime_checkable
from fastapi import Body, Query
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, field_validator
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.inference import InterleavedContent
@@ -372,6 +372,65 @@ VectorStoreFileStatus = Literal["completed"] | Literal["in_progress"] | Literal[
register_schema(VectorStoreFileStatus, name="VectorStoreFileStatus")
+# VectorStoreFileAttributes type with OpenAPI constraints
+VectorStoreFileAttributes = Annotated[
+ dict[str, Annotated[str, Field(max_length=512)] | float | bool],
+ Field(
+ max_length=16,
+ json_schema_extra={
+ "propertyNames": {"type": "string", "maxLength": 64},
+ "x-oaiTypeLabel": "map",
+ },
+ description=(
+ "Set of 16 key-value pairs that can be attached to an object. This can be "
+ "useful for storing additional information about the object in a structured "
+ "format, and querying for objects via API or the dashboard. Keys are strings "
+ "with a maximum length of 64 characters. Values are strings with a maximum "
+ "length of 512 characters, booleans, or numbers."
+ ),
+ ),
+]
+
+
+def _sanitize_vector_store_attributes(metadata: dict[str, Any] | None) -> dict[str, str | float | bool]:
+ """
+ Sanitize metadata to VectorStoreFileAttributes spec (max 16 properties, primitives only).
+
+ Converts dict[str, Any] to dict[str, str | float | bool]:
+ - Preserves: str (truncated to 512 chars), bool, int/float (as float)
+ - Converts: list -> comma-separated string
+ - Filters: dict, None, other types
+ - Enforces: max 16 properties, max 64 char keys, max 512 char string values
+ """
+ if not metadata:
+ return {}
+
+ sanitized: dict[str, str | float | bool] = {}
+ for key, value in metadata.items():
+ # Enforce max 16 properties
+ if len(sanitized) >= 16:
+ break
+
+ # Enforce max 64 char keys
+ if len(key) > 64:
+ continue
+
+ # Convert to supported primitive types
+ if isinstance(value, bool):
+ sanitized[key] = value
+ elif isinstance(value, int | float):
+ sanitized[key] = float(value)
+ elif isinstance(value, str):
+ # Enforce max 512 char string values
+ sanitized[key] = value[:512] if len(value) > 512 else value
+ elif isinstance(value, list):
+ # Convert lists to comma-separated strings (max 512 chars)
+ list_str = ", ".join(str(item) for item in value)
+ sanitized[key] = list_str[:512] if len(list_str) > 512 else list_str
+
+ return sanitized
+
+
@json_schema_type
class VectorStoreFileObject(BaseModel):
"""OpenAI Vector Store File object.
@@ -389,7 +448,7 @@ class VectorStoreFileObject(BaseModel):
id: str
object: str = "vector_store.file"
- attributes: dict[str, Any] = Field(default_factory=dict)
+ attributes: VectorStoreFileAttributes = Field(default_factory=dict)
chunking_strategy: VectorStoreChunkingStrategy
created_at: int
last_error: VectorStoreFileLastError | None = None
@@ -397,6 +456,12 @@ class VectorStoreFileObject(BaseModel):
usage_bytes: int = 0
vector_store_id: str
+ @field_validator("attributes", mode="before")
+ @classmethod
+ def _validate_attributes(cls, v: dict[str, Any] | None) -> dict[str, str | float | bool]:
+ """Sanitize attributes to match VectorStoreFileAttributes OpenAPI spec."""
+ return _sanitize_vector_store_attributes(v)
+
@json_schema_type
class VectorStoreListFilesResponse(BaseModel):
diff --git a/tests/integration/README.md b/tests/integration/README.md
index f581073ae..3559b785c 100644
--- a/tests/integration/README.md
+++ b/tests/integration/README.md
@@ -211,3 +211,23 @@ def test_asymmetric_embeddings(llama_stack_client, embedding_model_id):
assert query_response.embeddings is not None
```
+
+## TypeScript Client Replays
+
+TypeScript SDK tests can run alongside Python tests when testing against `server:` stacks. Set `TS_CLIENT_PATH` to the path or version of `llama-stack-client-typescript` to enable:
+
+```bash
+# Use published npm package (responses suite)
+TS_CLIENT_PATH=^0.3.2 scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
+
+# Use local checkout from ~/.cache (recommended for development)
+git clone https://github.com/llamastack/llama-stack-client-typescript.git ~/.cache/llama-stack-client-typescript
+TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
+
+# Run base suite with TypeScript tests
+TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite base --setup ollama
+```
+
+TypeScript tests run immediately after Python tests pass, using the same replay fixtures. The mapping between Python suites/setups and TypeScript test files is defined in `tests/integration/client-typescript/suites.json`.
+
+If `TS_CLIENT_PATH` is unset, TypeScript tests are skipped entirely.
diff --git a/tests/integration/agents/test_openai_responses.py b/tests/integration/agents/test_openai_responses.py
index 057cee774..d413d5201 100644
--- a/tests/integration/agents/test_openai_responses.py
+++ b/tests/integration/agents/test_openai_responses.py
@@ -516,169 +516,3 @@ def test_response_with_instructions(openai_client, client_with_models, text_mode
# Verify instructions from previous response was not carried over to the next response
assert response_with_instructions2.instructions == instructions2
-
-
-@pytest.mark.skip(reason="Tool calling is not reliable.")
-def test_max_tool_calls_with_function_tools(openai_client, client_with_models, text_model_id):
- """Test handling of max_tool_calls with function tools in responses."""
- if isinstance(client_with_models, LlamaStackAsLibraryClient):
- pytest.skip("OpenAI responses are not supported when testing with library client yet.")
-
- client = openai_client
- max_tool_calls = 1
-
- tools = [
- {
- "type": "function",
- "name": "get_weather",
- "description": "Get weather information for a specified location",
- "parameters": {
- "type": "object",
- "properties": {
- "location": {
- "type": "string",
- "description": "The city name (e.g., 'New York', 'London')",
- },
- },
- },
- },
- {
- "type": "function",
- "name": "get_time",
- "description": "Get current time for a specified location",
- "parameters": {
- "type": "object",
- "properties": {
- "location": {
- "type": "string",
- "description": "The city name (e.g., 'New York', 'London')",
- },
- },
- },
- },
- ]
-
- # First create a response that triggers function tools
- response = client.responses.create(
- model=text_model_id,
- input="Can you tell me the weather in Paris and the current time?",
- tools=tools,
- stream=False,
- max_tool_calls=max_tool_calls,
- )
-
- # Verify we got two function calls and that the max_tool_calls do not affect function tools
- assert len(response.output) == 2
- assert response.output[0].type == "function_call"
- assert response.output[0].name == "get_weather"
- assert response.output[0].status == "completed"
- assert response.output[1].type == "function_call"
- assert response.output[1].name == "get_time"
- assert response.output[0].status == "completed"
-
- # Verify we have a valid max_tool_calls field
- assert response.max_tool_calls == max_tool_calls
-
-
-def test_max_tool_calls_invalid(openai_client, client_with_models, text_model_id):
- """Test handling of invalid max_tool_calls in responses."""
- if isinstance(client_with_models, LlamaStackAsLibraryClient):
- pytest.skip("OpenAI responses are not supported when testing with library client yet.")
-
- client = openai_client
-
- input = "Search for today's top technology news."
- invalid_max_tool_calls = 0
- tools = [
- {"type": "web_search"},
- ]
-
- # Create a response with an invalid max_tool_calls value i.e. 0
- # Handle ValueError from LLS and BadRequestError from OpenAI client
- with pytest.raises((ValueError, BadRequestError)) as excinfo:
- client.responses.create(
- model=text_model_id,
- input=input,
- tools=tools,
- stream=False,
- max_tool_calls=invalid_max_tool_calls,
- )
-
- error_message = str(excinfo.value)
- assert f"Invalid max_tool_calls={invalid_max_tool_calls}; should be >= 1" in error_message, (
- f"Expected error message about invalid max_tool_calls, got: {error_message}"
- )
-
-
-def test_max_tool_calls_with_builtin_tools(openai_client, client_with_models, text_model_id):
- """Test handling of max_tool_calls with built-in tools in responses."""
- if isinstance(client_with_models, LlamaStackAsLibraryClient):
- pytest.skip("OpenAI responses are not supported when testing with library client yet.")
-
- client = openai_client
-
- input = "Search for today's top technology and a positive news story. You MUST make exactly two separate web search calls."
- max_tool_calls = [1, 5]
- tools = [
- {"type": "web_search"},
- ]
-
- # First create a response that triggers web_search tools without max_tool_calls
- response = client.responses.create(
- model=text_model_id,
- input=input,
- tools=tools,
- stream=False,
- )
-
- # Verify we got two web search calls followed by a message
- assert len(response.output) == 3
- assert response.output[0].type == "web_search_call"
- assert response.output[0].status == "completed"
- assert response.output[1].type == "web_search_call"
- assert response.output[1].status == "completed"
- assert response.output[2].type == "message"
- assert response.output[2].status == "completed"
- assert response.output[2].role == "assistant"
-
- # Next create a response that triggers web_search tools with max_tool_calls set to 1
- response_2 = client.responses.create(
- model=text_model_id,
- input=input,
- tools=tools,
- stream=False,
- max_tool_calls=max_tool_calls[0],
- )
-
- # Verify we got one web search tool call followed by a message
- assert len(response_2.output) == 2
- assert response_2.output[0].type == "web_search_call"
- assert response_2.output[0].status == "completed"
- assert response_2.output[1].type == "message"
- assert response_2.output[1].status == "completed"
- assert response_2.output[1].role == "assistant"
-
- # Verify we have a valid max_tool_calls field
- assert response_2.max_tool_calls == max_tool_calls[0]
-
- # Finally create a response that triggers web_search tools with max_tool_calls set to 5
- response_3 = client.responses.create(
- model=text_model_id,
- input=input,
- tools=tools,
- stream=False,
- max_tool_calls=max_tool_calls[1],
- )
-
- # Verify we got two web search calls followed by a message
- assert len(response_3.output) == 3
- assert response_3.output[0].type == "web_search_call"
- assert response_3.output[0].status == "completed"
- assert response_3.output[1].type == "web_search_call"
- assert response_3.output[1].status == "completed"
- assert response_3.output[2].type == "message"
- assert response_3.output[2].status == "completed"
- assert response_3.output[2].role == "assistant"
-
- # Verify we have a valid max_tool_calls field
- assert response_3.max_tool_calls == max_tool_calls[1]
diff --git a/tests/integration/client-typescript/__tests__/inference.test.ts b/tests/integration/client-typescript/__tests__/inference.test.ts
new file mode 100644
index 000000000..b0734fed7
--- /dev/null
+++ b/tests/integration/client-typescript/__tests__/inference.test.ts
@@ -0,0 +1,104 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/**
+ * Integration tests for Inference API (Chat Completions).
+ * Ported from: llama-stack/tests/integration/inference/test_openai_completion.py
+ *
+ * IMPORTANT: Test cases must match EXACTLY with Python tests to use recorded API responses.
+ */
+
+import { createTestClient, requireTextModel } from '../setup';
+
+describe('Inference API - Chat Completions', () => {
+ // Test cases matching llama-stack/tests/integration/test_cases/inference/chat_completion.json
+ const chatCompletionTestCases = [
+ {
+ id: 'non_streaming_01',
+ question: 'Which planet do humans live on?',
+ expected: 'earth',
+ testId:
+ 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:non_streaming_01]',
+ },
+ {
+ id: 'non_streaming_02',
+ question: 'Which planet has rings around it with a name starting with letter S?',
+ expected: 'saturn',
+ testId:
+ 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:non_streaming_02]',
+ },
+ ];
+
+ const streamingTestCases = [
+ {
+ id: 'streaming_01',
+ question: "What's the name of the Sun in latin?",
+ expected: 'sol',
+ testId:
+ 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:streaming_01]',
+ },
+ {
+ id: 'streaming_02',
+ question: 'What is the name of the US captial?',
+ expected: 'washington',
+ testId:
+ 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:streaming_02]',
+ },
+ ];
+
+ test.each(chatCompletionTestCases)(
+ 'chat completion non-streaming: $id',
+ async ({ question, expected, testId }) => {
+ const client = createTestClient(testId);
+ const textModel = requireTextModel();
+
+ const response = await client.chat.completions.create({
+ model: textModel,
+ messages: [
+ {
+ role: 'user',
+ content: question,
+ },
+ ],
+ stream: false,
+ });
+
+ // Non-streaming responses have choices with message property
+ const choice = response.choices[0];
+ expect(choice).toBeDefined();
+ if (!choice || !('message' in choice)) {
+ throw new Error('Expected non-streaming response with message');
+ }
+ const content = choice.message.content;
+ expect(content).toBeDefined();
+ const messageContent = typeof content === 'string' ? content.toLowerCase().trim() : '';
+ expect(messageContent.length).toBeGreaterThan(0);
+ expect(messageContent).toContain(expected.toLowerCase());
+ },
+ );
+
+ test.each(streamingTestCases)('chat completion streaming: $id', async ({ question, expected, testId }) => {
+ const client = createTestClient(testId);
+ const textModel = requireTextModel();
+
+ const stream = await client.chat.completions.create({
+ model: textModel,
+ messages: [{ role: 'user', content: question }],
+ stream: true,
+ });
+
+ const streamedContent: string[] = [];
+ for await (const chunk of stream) {
+ if (chunk.choices && chunk.choices.length > 0 && chunk.choices[0]?.delta?.content) {
+ streamedContent.push(chunk.choices[0].delta.content);
+ }
+ }
+
+ expect(streamedContent.length).toBeGreaterThan(0);
+ const fullContent = streamedContent.join('').toLowerCase().trim();
+ expect(fullContent).toContain(expected.toLowerCase());
+ });
+});
diff --git a/tests/integration/client-typescript/__tests__/responses.test.ts b/tests/integration/client-typescript/__tests__/responses.test.ts
new file mode 100644
index 000000000..0fc2a3245
--- /dev/null
+++ b/tests/integration/client-typescript/__tests__/responses.test.ts
@@ -0,0 +1,132 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/**
+ * Integration tests for Responses API.
+ * Ported from: llama-stack/tests/integration/responses/test_basic_responses.py
+ *
+ * IMPORTANT: Test cases and IDs must match EXACTLY with Python tests to use recorded API responses.
+ */
+
+import { createTestClient, requireTextModel, getResponseOutputText } from '../setup';
+
+describe('Responses API - Basic', () => {
+ // Test cases matching llama-stack/tests/integration/responses/fixtures/test_cases.py
+ const basicTestCases = [
+ {
+ id: 'earth',
+ input: 'Which planet do humans live on?',
+ expected: 'earth',
+ // Use client_with_models fixture to match non-streaming recordings
+ testId:
+ 'tests/integration/responses/test_basic_responses.py::test_response_non_streaming_basic[client_with_models-txt=openai/gpt-4o-earth]',
+ },
+ {
+ id: 'saturn',
+ input: 'Which planet has rings around it with a name starting with letter S?',
+ expected: 'saturn',
+ testId:
+ 'tests/integration/responses/test_basic_responses.py::test_response_non_streaming_basic[client_with_models-txt=openai/gpt-4o-saturn]',
+ },
+ ];
+
+ test.each(basicTestCases)('non-streaming basic response: $id', async ({ input, expected, testId }) => {
+ // Create client with test_id for all requests
+ const client = createTestClient(testId);
+ const textModel = requireTextModel();
+
+ // Create a response
+ const response = await client.responses.create({
+ model: textModel,
+ input,
+ stream: false,
+ });
+
+ // Verify response has content
+ const outputText = getResponseOutputText(response).toLowerCase().trim();
+ expect(outputText.length).toBeGreaterThan(0);
+ expect(outputText).toContain(expected.toLowerCase());
+
+ // Verify usage is reported
+ expect(response.usage).toBeDefined();
+ expect(response.usage!.input_tokens).toBeGreaterThan(0);
+ expect(response.usage!.output_tokens).toBeGreaterThan(0);
+ expect(response.usage!.total_tokens).toBe(response.usage!.input_tokens + response.usage!.output_tokens);
+
+ // Verify stored response matches
+ const retrievedResponse = await client.responses.retrieve(response.id);
+ expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(response));
+
+ // Test follow-up with previous_response_id
+ const nextResponse = await client.responses.create({
+ model: textModel,
+ input: 'Repeat your previous response in all caps.',
+ previous_response_id: response.id,
+ });
+ const nextOutputText = getResponseOutputText(nextResponse).trim();
+ expect(nextOutputText).toContain(expected.toUpperCase());
+ });
+
+ test.each(basicTestCases)('streaming basic response: $id', async ({ input, expected, testId }) => {
+ // Modify test_id for streaming variant
+ const streamingTestId = testId.replace(
+ 'test_response_non_streaming_basic',
+ 'test_response_streaming_basic',
+ );
+ const client = createTestClient(streamingTestId);
+ const textModel = requireTextModel();
+
+ // Create a streaming response
+ const stream = await client.responses.create({
+ model: textModel,
+ input,
+ stream: true,
+ });
+
+ const events: any[] = [];
+ let responseId = '';
+
+ for await (const chunk of stream) {
+ events.push(chunk);
+
+ if (chunk.type === 'response.created') {
+ // Verify response.created is the first event
+ expect(events.length).toBe(1);
+ expect(chunk.response.status).toBe('in_progress');
+ responseId = chunk.response.id;
+ } else if (chunk.type === 'response.completed') {
+ // Verify response.completed comes after response.created
+ expect(events.length).toBeGreaterThanOrEqual(2);
+ expect(chunk.response.status).toBe('completed');
+ expect(chunk.response.id).toBe(responseId);
+
+ // Verify content quality
+ const outputText = getResponseOutputText(chunk.response).toLowerCase().trim();
+ expect(outputText.length).toBeGreaterThan(0);
+ expect(outputText).toContain(expected.toLowerCase());
+
+ // Verify usage is reported
+ expect(chunk.response.usage).toBeDefined();
+ expect(chunk.response.usage!.input_tokens).toBeGreaterThan(0);
+ expect(chunk.response.usage!.output_tokens).toBeGreaterThan(0);
+ expect(chunk.response.usage!.total_tokens).toBe(
+ chunk.response.usage!.input_tokens + chunk.response.usage!.output_tokens,
+ );
+ }
+ }
+
+ // Verify we got both events
+ expect(events.length).toBeGreaterThanOrEqual(2);
+ const firstEvent = events[0];
+ const lastEvent = events[events.length - 1];
+ expect(firstEvent.type).toBe('response.created');
+ expect(lastEvent.type).toBe('response.completed');
+
+ // Verify stored response matches streamed response
+ const retrievedResponse = await client.responses.retrieve(responseId);
+ expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(lastEvent.response));
+ });
+});
diff --git a/tests/integration/client-typescript/jest.integration.config.js b/tests/integration/client-typescript/jest.integration.config.js
new file mode 100644
index 000000000..769bd177a
--- /dev/null
+++ b/tests/integration/client-typescript/jest.integration.config.js
@@ -0,0 +1,31 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/** @type {import('ts-jest').JestConfigWithTsJest} */
+module.exports = {
+ preset: 'ts-jest/presets/default-esm',
+ testEnvironment: 'node',
+ extensionsToTreatAsEsm: ['.ts'],
+ moduleNameMapper: {
+ '^(\\.{1,2}/.*)\\.js$': '$1',
+ },
+ transform: {
+ '^.+\\.tsx?$': [
+ 'ts-jest',
+ {
+ useESM: true,
+ tsconfig: {
+ module: 'ES2022',
+ moduleResolution: 'bundler',
+ },
+ },
+ ],
+ },
+ testMatch: ['/__tests__/**/*.test.ts'],
+ setupFilesAfterEnv: ['/setup.ts'],
+ testTimeout: 60000, // 60 seconds (integration tests can be slow)
+ watchman: false, // Disable watchman to avoid permission issues
+};
diff --git a/tests/integration/client-typescript/package-lock.json b/tests/integration/client-typescript/package-lock.json
new file mode 100644
index 000000000..f118a07e3
--- /dev/null
+++ b/tests/integration/client-typescript/package-lock.json
@@ -0,0 +1,5507 @@
+{
+ "name": "llama-stack-typescript-integration-tests",
+ "version": "0.0.1",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "llama-stack-typescript-integration-tests",
+ "version": "0.0.1",
+ "dependencies": {
+ "llama-stack-client": "^0.3.2"
+ },
+ "devDependencies": {
+ "@swc/core": "^1.3.102",
+ "@swc/jest": "^0.2.29",
+ "@types/jest": "^29.4.0",
+ "@types/node": "^20.0.0",
+ "jest": "^29.4.0",
+ "ts-jest": "^29.1.0",
+ "typescript": "^5.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
+ "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.1.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz",
+ "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz",
+ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-compilation-targets": "^7.27.2",
+ "@babel/helper-module-transforms": "^7.28.3",
+ "@babel/helpers": "^7.28.4",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/traverse": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/remapping": "^2.3.5",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz",
+ "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/gen-mapping": "^0.3.12",
+ "@jridgewell/trace-mapping": "^0.3.28",
+ "jsesc": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz",
+ "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/compat-data": "^7.27.2",
+ "@babel/helper-validator-option": "^7.27.1",
+ "browserslist": "^4.24.0",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-globals": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
+ "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz",
+ "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/traverse": "^7.27.1",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.28.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz",
+ "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "@babel/traverse": "^7.28.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz",
+ "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
+ "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
+ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.5"
+ },
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-async-generators": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
+ "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-bigint": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz",
+ "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-class-properties": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
+ "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-class-static-block": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz",
+ "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-attributes": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz",
+ "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-meta": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
+ "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-json-strings": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
+ "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-jsx": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz",
+ "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-logical-assignment-operators": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
+ "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
+ "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-numeric-separator": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
+ "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-object-rest-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
+ "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-catch-binding": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
+ "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-chaining": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
+ "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-private-property-in-object": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz",
+ "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-top-level-await": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz",
+ "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-typescript": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz",
+ "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
+ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/parser": "^7.27.2",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz",
+ "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-globals": "^7.28.0",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.5",
+ "debug": "^4.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.28.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@bcoe/v8-coverage": {
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz",
+ "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@istanbuljs/load-nyc-config": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz",
+ "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "camelcase": "^5.3.1",
+ "find-up": "^4.1.0",
+ "get-package-type": "^0.1.0",
+ "js-yaml": "^3.13.1",
+ "resolve-from": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@istanbuljs/schema": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz",
+ "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@jest/console": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz",
+ "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/console/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/console/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/console/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/core": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz",
+ "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/console": "^29.7.0",
+ "@jest/reporters": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "ansi-escapes": "^4.2.1",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "exit": "^0.1.2",
+ "graceful-fs": "^4.2.9",
+ "jest-changed-files": "^29.7.0",
+ "jest-config": "^29.7.0",
+ "jest-haste-map": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-regex-util": "^29.6.3",
+ "jest-resolve": "^29.7.0",
+ "jest-resolve-dependencies": "^29.7.0",
+ "jest-runner": "^29.7.0",
+ "jest-runtime": "^29.7.0",
+ "jest-snapshot": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "jest-watcher": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "pretty-format": "^29.7.0",
+ "slash": "^3.0.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@jest/core/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/core/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/core/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/core/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/create-cache-key-function": {
+ "version": "30.2.0",
+ "resolved": "https://registry.npmjs.org/@jest/create-cache-key-function/-/create-cache-key-function-30.2.0.tgz",
+ "integrity": "sha512-44F4l4Enf+MirJN8X/NhdGkl71k5rBYiwdVlo4HxOwbu0sHV8QKrGEedb1VUU4K3W7fBKE0HGfbn7eZm0Ti3zg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "30.2.0"
+ },
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/@jest/environment": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz",
+ "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "jest-mock": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/environment/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/environment/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/environment/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/expect": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz",
+ "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "expect": "^29.7.0",
+ "jest-snapshot": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/expect-utils": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz",
+ "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "jest-get-type": "^29.6.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/fake-timers": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz",
+ "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@sinonjs/fake-timers": "^10.0.2",
+ "@types/node": "*",
+ "jest-message-util": "^29.7.0",
+ "jest-mock": "^29.7.0",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/fake-timers/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/fake-timers/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/fake-timers/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/globals": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz",
+ "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/expect": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "jest-mock": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/globals/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/globals/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/globals/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/pattern": {
+ "version": "30.0.1",
+ "resolved": "https://registry.npmjs.org/@jest/pattern/-/pattern-30.0.1.tgz",
+ "integrity": "sha512-gWp7NfQW27LaBQz3TITS8L7ZCQ0TLvtmI//4OwlQRx4rnWxcPNIYjxZpDcN4+UlGxgm3jS5QPz8IPTCkb59wZA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "jest-regex-util": "30.0.1"
+ },
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/@jest/reporters": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz",
+ "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@bcoe/v8-coverage": "^0.2.3",
+ "@jest/console": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@jridgewell/trace-mapping": "^0.3.18",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "collect-v8-coverage": "^1.0.0",
+ "exit": "^0.1.2",
+ "glob": "^7.1.3",
+ "graceful-fs": "^4.2.9",
+ "istanbul-lib-coverage": "^3.0.0",
+ "istanbul-lib-instrument": "^6.0.0",
+ "istanbul-lib-report": "^3.0.0",
+ "istanbul-lib-source-maps": "^4.0.0",
+ "istanbul-reports": "^3.1.3",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-worker": "^29.7.0",
+ "slash": "^3.0.0",
+ "string-length": "^4.0.1",
+ "strip-ansi": "^6.0.0",
+ "v8-to-istanbul": "^9.0.1"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@jest/reporters/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/reporters/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/reporters/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/schemas": {
+ "version": "30.0.5",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz",
+ "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.34.0"
+ },
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/@jest/source-map": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz",
+ "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.18",
+ "callsites": "^3.0.0",
+ "graceful-fs": "^4.2.9"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-result": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz",
+ "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/console": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "collect-v8-coverage": "^1.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-result/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-result/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-result/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/test-sequencer": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz",
+ "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/test-result": "^29.7.0",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/transform": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz",
+ "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.11.6",
+ "@jest/types": "^29.6.3",
+ "@jridgewell/trace-mapping": "^0.3.18",
+ "babel-plugin-istanbul": "^6.1.1",
+ "chalk": "^4.0.0",
+ "convert-source-map": "^2.0.0",
+ "fast-json-stable-stringify": "^2.1.0",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "jest-regex-util": "^29.6.3",
+ "jest-util": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "pirates": "^4.0.4",
+ "slash": "^3.0.0",
+ "write-file-atomic": "^4.0.2"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/transform/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/transform/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/transform/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/transform/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/types": {
+ "version": "30.2.0",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-30.2.0.tgz",
+ "integrity": "sha512-H9xg1/sfVvyfU7o3zMfBEjQ1gcsdeTMgqHoYdN79tuLqfTtuu7WckRA1R5whDwOzxaZAeMKTYWqP+WCAi0CHsg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/pattern": "30.0.1",
+ "@jest/schemas": "30.0.5",
+ "@types/istanbul-lib-coverage": "^2.0.6",
+ "@types/istanbul-reports": "^3.0.4",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.33",
+ "chalk": "^4.1.2"
+ },
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.13",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
+ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.0",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/remapping": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@sinclair/typebox": {
+ "version": "0.34.41",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz",
+ "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@sinonjs/commons": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz",
+ "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "type-detect": "4.0.8"
+ }
+ },
+ "node_modules/@sinonjs/fake-timers": {
+ "version": "10.3.0",
+ "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz",
+ "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@sinonjs/commons": "^3.0.0"
+ }
+ },
+ "node_modules/@swc/core": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.15.2.tgz",
+ "integrity": "sha512-OQm+yJdXxvSjqGeaWhP6Ia264ogifwAO7Q12uTDVYj/Ks4jBTI4JknlcjDRAXtRhqbWsfbZyK/5RtuIPyptk3w==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@swc/counter": "^0.1.3",
+ "@swc/types": "^0.1.25"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/swc"
+ },
+ "optionalDependencies": {
+ "@swc/core-darwin-arm64": "1.15.2",
+ "@swc/core-darwin-x64": "1.15.2",
+ "@swc/core-linux-arm-gnueabihf": "1.15.2",
+ "@swc/core-linux-arm64-gnu": "1.15.2",
+ "@swc/core-linux-arm64-musl": "1.15.2",
+ "@swc/core-linux-x64-gnu": "1.15.2",
+ "@swc/core-linux-x64-musl": "1.15.2",
+ "@swc/core-win32-arm64-msvc": "1.15.2",
+ "@swc/core-win32-ia32-msvc": "1.15.2",
+ "@swc/core-win32-x64-msvc": "1.15.2"
+ },
+ "peerDependencies": {
+ "@swc/helpers": ">=0.5.17"
+ },
+ "peerDependenciesMeta": {
+ "@swc/helpers": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@swc/core-darwin-arm64": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.15.2.tgz",
+ "integrity": "sha512-Ghyz4RJv4zyXzrUC1B2MLQBbppIB5c4jMZJybX2ebdEQAvryEKp3gq1kBksCNsatKGmEgXul88SETU19sMWcrw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-darwin-x64": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.15.2.tgz",
+ "integrity": "sha512-7n/PGJOcL2QoptzL42L5xFFfXY5rFxLHnuz1foU+4ruUTG8x2IebGhtwVTpaDN8ShEv2UZObBlT1rrXTba15Zw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-arm-gnueabihf": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.15.2.tgz",
+ "integrity": "sha512-ZUQVCfRJ9wimuxkStRSlLwqX4TEDmv6/J+E6FicGkQ6ssLMWoKDy0cAo93HiWt/TWEee5vFhFaSQYzCuBEGO6A==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-arm64-gnu": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.15.2.tgz",
+ "integrity": "sha512-GZh3pYBmfnpQ+JIg+TqLuz+pM+Mjsk5VOzi8nwKn/m+GvQBsxD5ectRtxuWUxMGNG8h0lMy4SnHRqdK3/iJl7A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-arm64-musl": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.15.2.tgz",
+ "integrity": "sha512-5av6VYZZeneiYIodwzGMlnyVakpuYZryGzFIbgu1XP8wVylZxduEzup4eP8atiMDFmIm+s4wn8GySJmYqeJC0A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-x64-gnu": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.15.2.tgz",
+ "integrity": "sha512-1nO/UfdCLuT/uE/7oB3EZgTeZDCIa6nL72cFEpdegnqpJVNDI6Qb8U4g/4lfVPkmHq2lvxQ0L+n+JdgaZLhrRA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-x64-musl": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.15.2.tgz",
+ "integrity": "sha512-Ksfrb0Tx310kr+TLiUOvB/I80lyZ3lSOp6cM18zmNRT/92NB4mW8oX2Jo7K4eVEI2JWyaQUAFubDSha2Q+439A==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-win32-arm64-msvc": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.15.2.tgz",
+ "integrity": "sha512-IzUb5RlMUY0r1A9IuJrQ7Tbts1wWb73/zXVXT8VhewbHGoNlBKE0qUhKMED6Tv4wDF+pmbtUJmKXDthytAvLmg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-win32-ia32-msvc": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.15.2.tgz",
+ "integrity": "sha512-kCATEzuY2LP9AlbU2uScjcVhgnCAkRdu62vbce17Ro5kxEHxYWcugkveyBRS3AqZGtwAKYbMAuNloer9LS/hpw==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-win32-x64-msvc": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.15.2.tgz",
+ "integrity": "sha512-iJaHeYCF4jTn7OEKSa3KRiuVFIVYts8jYjNmCdyz1u5g8HRyTDISD76r8+ljEOgm36oviRQvcXaw6LFp1m0yyA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/counter": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz",
+ "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==",
+ "dev": true,
+ "license": "Apache-2.0"
+ },
+ "node_modules/@swc/jest": {
+ "version": "0.2.39",
+ "resolved": "https://registry.npmjs.org/@swc/jest/-/jest-0.2.39.tgz",
+ "integrity": "sha512-eyokjOwYd0Q8RnMHri+8/FS1HIrIUKK/sRrFp8c1dThUOfNeCWbLmBP1P5VsKdvmkd25JaH+OKYwEYiAYg9YAA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/create-cache-key-function": "^30.0.0",
+ "@swc/counter": "^0.1.3",
+ "jsonc-parser": "^3.2.0"
+ },
+ "engines": {
+ "npm": ">= 7.0.0"
+ },
+ "peerDependencies": {
+ "@swc/core": "*"
+ }
+ },
+ "node_modules/@swc/types": {
+ "version": "0.1.25",
+ "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.25.tgz",
+ "integrity": "sha512-iAoY/qRhNH8a/hBvm3zKj9qQ4oc2+3w1unPJa2XvTK3XjeLXtzcCingVPw/9e5mn1+0yPqxcBGp9Jf0pkfMb1g==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@swc/counter": "^0.1.3"
+ }
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.5",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
+ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.27.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
+ "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
+ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
+ "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.2"
+ }
+ },
+ "node_modules/@types/graceful-fs": {
+ "version": "4.1.9",
+ "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz",
+ "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/istanbul-lib-coverage": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz",
+ "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/istanbul-lib-report": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz",
+ "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/istanbul-lib-coverage": "*"
+ }
+ },
+ "node_modules/@types/istanbul-reports": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz",
+ "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/istanbul-lib-report": "*"
+ }
+ },
+ "node_modules/@types/jest": {
+ "version": "29.5.14",
+ "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz",
+ "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "expect": "^29.0.0",
+ "pretty-format": "^29.0.0"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "20.19.25",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.25.tgz",
+ "integrity": "sha512-ZsJzA5thDQMSQO788d7IocwwQbI8B5OPzmqNvpf3NY/+MHDAS759Wo0gd2WQeXYt5AAAQjzcrTVC6SKCuYgoCQ==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~6.21.0"
+ }
+ },
+ "node_modules/@types/node-fetch": {
+ "version": "2.6.13",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz",
+ "integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "form-data": "^4.0.4"
+ }
+ },
+ "node_modules/@types/stack-utils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz",
+ "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/yargs": {
+ "version": "17.0.35",
+ "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.35.tgz",
+ "integrity": "sha512-qUHkeCyQFxMXg79wQfTtfndEC+N9ZZg76HJftDJp+qH2tV7Gj4OJi7l+PiWwJ+pWtW8GwSmqsDj/oymhrTWXjg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/yargs-parser": "*"
+ }
+ },
+ "node_modules/@types/yargs-parser": {
+ "version": "21.0.3",
+ "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz",
+ "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/abort-controller": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
+ "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
+ "license": "MIT",
+ "dependencies": {
+ "event-target-shim": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=6.5"
+ }
+ },
+ "node_modules/agentkeepalive": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
+ "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
+ "license": "MIT",
+ "dependencies": {
+ "humanize-ms": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 8.0.0"
+ }
+ },
+ "node_modules/ansi-escapes": {
+ "version": "4.3.2",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
+ "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "type-fest": "^0.21.3"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/anymatch": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "license": "MIT"
+ },
+ "node_modules/babel-jest": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz",
+ "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/transform": "^29.7.0",
+ "@types/babel__core": "^7.1.14",
+ "babel-plugin-istanbul": "^6.1.1",
+ "babel-preset-jest": "^29.6.3",
+ "chalk": "^4.0.0",
+ "graceful-fs": "^4.2.9",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.8.0"
+ }
+ },
+ "node_modules/babel-plugin-istanbul": {
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz",
+ "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@istanbuljs/load-nyc-config": "^1.0.0",
+ "@istanbuljs/schema": "^0.1.2",
+ "istanbul-lib-instrument": "^5.0.4",
+ "test-exclude": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz",
+ "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@babel/core": "^7.12.3",
+ "@babel/parser": "^7.14.7",
+ "@istanbuljs/schema": "^0.1.2",
+ "istanbul-lib-coverage": "^3.2.0",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/babel-plugin-jest-hoist": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz",
+ "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/template": "^7.3.3",
+ "@babel/types": "^7.3.3",
+ "@types/babel__core": "^7.1.14",
+ "@types/babel__traverse": "^7.0.6"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/babel-preset-current-node-syntax": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz",
+ "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/plugin-syntax-async-generators": "^7.8.4",
+ "@babel/plugin-syntax-bigint": "^7.8.3",
+ "@babel/plugin-syntax-class-properties": "^7.12.13",
+ "@babel/plugin-syntax-class-static-block": "^7.14.5",
+ "@babel/plugin-syntax-import-attributes": "^7.24.7",
+ "@babel/plugin-syntax-import-meta": "^7.10.4",
+ "@babel/plugin-syntax-json-strings": "^7.8.3",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3",
+ "@babel/plugin-syntax-private-property-in-object": "^7.14.5",
+ "@babel/plugin-syntax-top-level-await": "^7.14.5"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0 || ^8.0.0-0"
+ }
+ },
+ "node_modules/babel-preset-jest": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz",
+ "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "babel-plugin-jest-hoist": "^29.6.3",
+ "babel-preset-current-node-syntax": "^1.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/baseline-browser-mapping": {
+ "version": "2.8.29",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.29.tgz",
+ "integrity": "sha512-sXdt2elaVnhpDNRDz+1BDx1JQoJRuNk7oVlAlbGiFkLikHCAQiccexF/9e91zVi6RCgqspl04aP+6Cnl9zRLrA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "baseline-browser-mapping": "dist/cli.js"
+ }
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "fill-range": "^7.1.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.28.0",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.0.tgz",
+ "integrity": "sha512-tbydkR/CxfMwelN0vwdP/pLkDwyAASZ+VfWm4EOwlB6SWhx1sYnWLqo8N5j0rAzPfzfRaxt0mM/4wPU/Su84RQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "baseline-browser-mapping": "^2.8.25",
+ "caniuse-lite": "^1.0.30001754",
+ "electron-to-chromium": "^1.5.249",
+ "node-releases": "^2.0.27",
+ "update-browserslist-db": "^1.1.4"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/bs-logger": {
+ "version": "0.2.6",
+ "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz",
+ "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "fast-json-stable-stringify": "2.x"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/bser": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz",
+ "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "node-int64": "^0.4.0"
+ }
+ },
+ "node_modules/buffer-from": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
+ "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001755",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001755.tgz",
+ "integrity": "sha512-44V+Jm6ctPj7R52Na4TLi3Zri4dWUljJd+RDm+j8LtNCc/ihLCT+X1TzoOAkRETEWqjuLnh9581Tl80FvK7jVA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "CC-BY-4.0"
+ },
+ "node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/char-regex": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz",
+ "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/ci-info": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz",
+ "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/sibiraj-s"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cjs-module-lexer": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz",
+ "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cliui": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+ "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/co": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+ "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "iojs": ">= 1.0.0",
+ "node": ">= 0.12.0"
+ }
+ },
+ "node_modules/collect-v8-coverage": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.3.tgz",
+ "integrity": "sha512-1L5aqIkwPfiodaMgQunkF1zRhNqifHBmtbbbxcr6yVxxBnliw4TDOW6NxpO8DJLgJ16OT+Y4ztZqP6p/FtXnAw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/create-jest": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz",
+ "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "chalk": "^4.0.0",
+ "exit": "^0.1.2",
+ "graceful-fs": "^4.2.9",
+ "jest-config": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "prompts": "^2.0.1"
+ },
+ "bin": {
+ "create-jest": "bin/create-jest.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/create-jest/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/create-jest/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/create-jest/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/dedent": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.0.tgz",
+ "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==",
+ "dev": true,
+ "license": "MIT",
+ "peerDependencies": {
+ "babel-plugin-macros": "^3.1.0"
+ },
+ "peerDependenciesMeta": {
+ "babel-plugin-macros": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/deepmerge": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz",
+ "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/detect-newline": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz",
+ "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/diff-sequences": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz",
+ "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.255",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.255.tgz",
+ "integrity": "sha512-Z9oIp4HrFF/cZkDPMpz2XSuVpc1THDpT4dlmATFlJUIBVCy9Vap5/rIXsASP1CscBacBqhabwh8vLctqBwEerQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/emittery": {
+ "version": "0.13.1",
+ "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz",
+ "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/emittery?sponsor=1"
+ }
+ },
+ "node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/error-ex": {
+ "version": "1.3.4",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz",
+ "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz",
+ "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/event-target-shim": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
+ "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/execa": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
+ "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^6.0.0",
+ "human-signals": "^2.1.0",
+ "is-stream": "^2.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^4.0.1",
+ "onetime": "^5.1.2",
+ "signal-exit": "^3.0.3",
+ "strip-final-newline": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/execa?sponsor=1"
+ }
+ },
+ "node_modules/exit": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz",
+ "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/expect": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz",
+ "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/expect-utils": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "jest-matcher-utils": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/fb-watchman": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz",
+ "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "bser": "2.1.1"
+ }
+ },
+ "node_modules/fill-range": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/form-data-encoder": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
+ "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
+ "license": "MIT"
+ },
+ "node_modules/formdata-node": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
+ "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
+ "license": "MIT",
+ "dependencies": {
+ "node-domexception": "1.0.0",
+ "web-streams-polyfill": "4.0.0-beta.3"
+ },
+ "engines": {
+ "node": ">= 12.20"
+ }
+ },
+ "node_modules/fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": "6.* || 8.* || >= 10.*"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-package-type": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz",
+ "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/get-stream": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
+ "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "deprecated": "Glob versions prior to v9 are no longer supported",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.11",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
+ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/handlebars": {
+ "version": "4.7.8",
+ "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz",
+ "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "minimist": "^1.2.5",
+ "neo-async": "^2.6.2",
+ "source-map": "^0.6.1",
+ "wordwrap": "^1.0.0"
+ },
+ "bin": {
+ "handlebars": "bin/handlebars"
+ },
+ "engines": {
+ "node": ">=0.4.7"
+ },
+ "optionalDependencies": {
+ "uglify-js": "^3.1.4"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "license": "MIT",
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/html-escaper": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
+ "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/human-signals": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
+ "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=10.17.0"
+ }
+ },
+ "node_modules/humanize-ms": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
+ "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.0.0"
+ }
+ },
+ "node_modules/import-local": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz",
+ "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "pkg-dir": "^4.2.0",
+ "resolve-cwd": "^3.0.0"
+ },
+ "bin": {
+ "import-local-fixture": "fixtures/cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.8.19"
+ }
+ },
+ "node_modules/inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
+ "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/is-core-module": {
+ "version": "2.16.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz",
+ "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-generator-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz",
+ "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-stream": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
+ "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/istanbul-lib-coverage": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz",
+ "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/istanbul-lib-instrument": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz",
+ "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@babel/core": "^7.23.9",
+ "@babel/parser": "^7.23.9",
+ "@istanbuljs/schema": "^0.1.3",
+ "istanbul-lib-coverage": "^3.2.0",
+ "semver": "^7.5.4"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-instrument/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-report": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz",
+ "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "istanbul-lib-coverage": "^3.0.0",
+ "make-dir": "^4.0.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-source-maps": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz",
+ "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "debug": "^4.1.1",
+ "istanbul-lib-coverage": "^3.0.0",
+ "source-map": "^0.6.1"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-reports": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz",
+ "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "html-escaper": "^2.0.0",
+ "istanbul-lib-report": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/jest": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz",
+ "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/core": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "import-local": "^3.0.2",
+ "jest-cli": "^29.7.0"
+ },
+ "bin": {
+ "jest": "bin/jest.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-changed-files": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz",
+ "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "execa": "^5.0.0",
+ "jest-util": "^29.7.0",
+ "p-limit": "^3.1.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-circus": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz",
+ "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/expect": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "co": "^4.6.0",
+ "dedent": "^1.0.0",
+ "is-generator-fn": "^2.0.0",
+ "jest-each": "^29.7.0",
+ "jest-matcher-utils": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-runtime": "^29.7.0",
+ "jest-snapshot": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "p-limit": "^3.1.0",
+ "pretty-format": "^29.7.0",
+ "pure-rand": "^6.0.0",
+ "slash": "^3.0.0",
+ "stack-utils": "^2.0.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-circus/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-circus/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-circus/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-cli": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz",
+ "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/core": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "chalk": "^4.0.0",
+ "create-jest": "^29.7.0",
+ "exit": "^0.1.2",
+ "import-local": "^3.0.2",
+ "jest-config": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "yargs": "^17.3.1"
+ },
+ "bin": {
+ "jest": "bin/jest.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-cli/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-cli/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-cli/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-config": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz",
+ "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.11.6",
+ "@jest/test-sequencer": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "babel-jest": "^29.7.0",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "deepmerge": "^4.2.2",
+ "glob": "^7.1.3",
+ "graceful-fs": "^4.2.9",
+ "jest-circus": "^29.7.0",
+ "jest-environment-node": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "jest-regex-util": "^29.6.3",
+ "jest-resolve": "^29.7.0",
+ "jest-runner": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "parse-json": "^5.2.0",
+ "pretty-format": "^29.7.0",
+ "slash": "^3.0.0",
+ "strip-json-comments": "^3.1.1"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "@types/node": "*",
+ "ts-node": ">=9.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "ts-node": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-config/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-config/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-config/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-config/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-diff": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz",
+ "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "chalk": "^4.0.0",
+ "diff-sequences": "^29.6.3",
+ "jest-get-type": "^29.6.3",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-docblock": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz",
+ "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "detect-newline": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-each": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz",
+ "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "chalk": "^4.0.0",
+ "jest-get-type": "^29.6.3",
+ "jest-util": "^29.7.0",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-each/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-each/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-each/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-environment-node": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz",
+ "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "jest-mock": "^29.7.0",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-environment-node/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-environment-node/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-environment-node/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-get-type": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz",
+ "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-haste-map": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz",
+ "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/graceful-fs": "^4.1.3",
+ "@types/node": "*",
+ "anymatch": "^3.0.3",
+ "fb-watchman": "^2.0.0",
+ "graceful-fs": "^4.2.9",
+ "jest-regex-util": "^29.6.3",
+ "jest-util": "^29.7.0",
+ "jest-worker": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "walker": "^1.0.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "^2.3.2"
+ }
+ },
+ "node_modules/jest-haste-map/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-haste-map/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-haste-map/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-haste-map/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-leak-detector": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz",
+ "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "jest-get-type": "^29.6.3",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-matcher-utils": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz",
+ "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "chalk": "^4.0.0",
+ "jest-diff": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-message-util": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz",
+ "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.12.13",
+ "@jest/types": "^29.6.3",
+ "@types/stack-utils": "^2.0.0",
+ "chalk": "^4.0.0",
+ "graceful-fs": "^4.2.9",
+ "micromatch": "^4.0.4",
+ "pretty-format": "^29.7.0",
+ "slash": "^3.0.0",
+ "stack-utils": "^2.0.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-message-util/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-message-util/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-message-util/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-mock": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz",
+ "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-mock/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-mock/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-mock/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-pnp-resolver": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz",
+ "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ },
+ "peerDependencies": {
+ "jest-resolve": "*"
+ },
+ "peerDependenciesMeta": {
+ "jest-resolve": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-regex-util": {
+ "version": "30.0.1",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-30.0.1.tgz",
+ "integrity": "sha512-jHEQgBXAgc+Gh4g0p3bCevgRCVRkB4VB70zhoAE48gxeSr1hfUOsM/C2WoJgVL7Eyg//hudYENbm3Ne+/dRVVA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/jest-resolve": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz",
+ "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "chalk": "^4.0.0",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "jest-pnp-resolver": "^1.2.2",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "resolve": "^1.20.0",
+ "resolve.exports": "^2.0.0",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-resolve-dependencies": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz",
+ "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "jest-regex-util": "^29.6.3",
+ "jest-snapshot": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-resolve-dependencies/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runner": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz",
+ "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/console": "^29.7.0",
+ "@jest/environment": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "emittery": "^0.13.1",
+ "graceful-fs": "^4.2.9",
+ "jest-docblock": "^29.7.0",
+ "jest-environment-node": "^29.7.0",
+ "jest-haste-map": "^29.7.0",
+ "jest-leak-detector": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-resolve": "^29.7.0",
+ "jest-runtime": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-watcher": "^29.7.0",
+ "jest-worker": "^29.7.0",
+ "p-limit": "^3.1.0",
+ "source-map-support": "0.5.13"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runner/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runner/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runner/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-runtime": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz",
+ "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/globals": "^29.7.0",
+ "@jest/source-map": "^29.6.3",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "cjs-module-lexer": "^1.0.0",
+ "collect-v8-coverage": "^1.0.0",
+ "glob": "^7.1.3",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-mock": "^29.7.0",
+ "jest-regex-util": "^29.6.3",
+ "jest-resolve": "^29.7.0",
+ "jest-snapshot": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "slash": "^3.0.0",
+ "strip-bom": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runtime/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runtime/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runtime/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-runtime/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz",
+ "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.11.6",
+ "@babel/generator": "^7.7.2",
+ "@babel/plugin-syntax-jsx": "^7.7.2",
+ "@babel/plugin-syntax-typescript": "^7.7.2",
+ "@babel/types": "^7.3.3",
+ "@jest/expect-utils": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "babel-preset-current-node-syntax": "^1.0.0",
+ "chalk": "^4.0.0",
+ "expect": "^29.7.0",
+ "graceful-fs": "^4.2.9",
+ "jest-diff": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "jest-matcher-utils": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "natural-compare": "^1.4.0",
+ "pretty-format": "^29.7.0",
+ "semver": "^7.5.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-snapshot/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/jest-util": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz",
+ "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "graceful-fs": "^4.2.9",
+ "picomatch": "^2.2.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-util/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-util/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-util/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-validate": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz",
+ "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "camelcase": "^6.2.0",
+ "chalk": "^4.0.0",
+ "jest-get-type": "^29.6.3",
+ "leven": "^3.1.0",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-validate/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-validate/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-validate/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-validate/node_modules/camelcase": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
+ "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/jest-watcher": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz",
+ "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/test-result": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "ansi-escapes": "^4.2.1",
+ "chalk": "^4.0.0",
+ "emittery": "^0.13.1",
+ "jest-util": "^29.7.0",
+ "string-length": "^4.0.1"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-watcher/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-watcher/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-watcher/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-worker": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz",
+ "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "jest-util": "^29.7.0",
+ "merge-stream": "^2.0.0",
+ "supports-color": "^8.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-worker/node_modules/supports-color": {
+ "version": "8.1.1",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
+ "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/supports-color?sponsor=1"
+ }
+ },
+ "node_modules/jest/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/js-yaml": {
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
+ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsesc": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
+ "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/json-parse-even-better-errors": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
+ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/jsonc-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.3.1.tgz",
+ "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/kleur": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz",
+ "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/leven": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
+ "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/llama-stack-client": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/llama-stack-client/-/llama-stack-client-0.3.2.tgz",
+ "integrity": "sha512-vzcnIN6k3sp7dhMXSnyrPSd82ACH/H3snj2uF6DgZwZCacKQNp2Y5XIT5qZZgoM1EUXbaxdVYFCeWD9yNCwatw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "^18.11.18",
+ "@types/node-fetch": "^2.6.4",
+ "abort-controller": "^3.0.0",
+ "agentkeepalive": "^4.2.1",
+ "form-data-encoder": "1.7.2",
+ "formdata-node": "^4.3.2",
+ "node-fetch": "^2.6.7"
+ }
+ },
+ "node_modules/llama-stack-client/node_modules/@types/node": {
+ "version": "18.19.130",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz",
+ "integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/llama-stack-client/node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "license": "MIT"
+ },
+ "node_modules/locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "p-locate": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/lodash.memoize": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
+ "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/make-dir": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz",
+ "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "semver": "^7.5.3"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/make-dir/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/make-error": {
+ "version": "1.3.6",
+ "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz",
+ "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/makeerror": {
+ "version": "1.0.12",
+ "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz",
+ "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "tmpl": "1.0.5"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "braces": "^3.0.3",
+ "picomatch": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mimic-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+ "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/minimist": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
+ "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/neo-async": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
+ "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/node-domexception": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
+ "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
+ "deprecated": "Use your platform's native DOMException instead",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/jimmywarting"
+ },
+ {
+ "type": "github",
+ "url": "https://paypal.me/jimmywarting"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.5.0"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/node-int64": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz",
+ "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.27",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/npm-run-path": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
+ "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "path-key": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/onetime": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
+ "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "mimic-fn": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "yocto-queue": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "p-limit": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/p-locate/node_modules/p-limit": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
+ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "p-try": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/parse-json": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
+ "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.0.0",
+ "error-ex": "^1.3.1",
+ "json-parse-even-better-errors": "^2.3.0",
+ "lines-and-columns": "^1.1.6"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pirates": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz",
+ "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/pkg-dir": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
+ "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "find-up": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pretty-format": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz",
+ "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "ansi-styles": "^5.0.0",
+ "react-is": "^18.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/pretty-format/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/pretty-format/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/pretty-format/node_modules/ansi-styles": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
+ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/prompts": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz",
+ "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "kleur": "^3.0.3",
+ "sisteransi": "^1.0.5"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/pure-rand": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz",
+ "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/dubzzz"
+ },
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/fast-check"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/react-is": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz",
+ "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/resolve": {
+ "version": "1.22.11",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz",
+ "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-core-module": "^2.16.1",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/resolve-cwd": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz",
+ "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "resolve-from": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/resolve-from": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
+ "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/resolve.exports": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz",
+ "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/sisteransi": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
+ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-support": {
+ "version": "0.5.13",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz",
+ "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ }
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==",
+ "dev": true,
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/stack-utils": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz",
+ "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "escape-string-regexp": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/string-length": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz",
+ "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "char-regex": "^1.0.2",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-bom": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz",
+ "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-final-newline": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
+ "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/strip-json-comments": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/test-exclude": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz",
+ "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "@istanbuljs/schema": "^0.1.2",
+ "glob": "^7.1.4",
+ "minimatch": "^3.0.4"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/tmpl": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz",
+ "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
+ "dev": true,
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+ "license": "MIT"
+ },
+ "node_modules/ts-jest": {
+ "version": "29.4.5",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.5.tgz",
+ "integrity": "sha512-HO3GyiWn2qvTQA4kTgjDcXiMwYQt68a1Y8+JuLRVpdIzm+UOLSHgl/XqR4c6nzJkq5rOkjc02O2I7P7l/Yof0Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "bs-logger": "^0.2.6",
+ "fast-json-stable-stringify": "^2.1.0",
+ "handlebars": "^4.7.8",
+ "json5": "^2.2.3",
+ "lodash.memoize": "^4.1.2",
+ "make-error": "^1.3.6",
+ "semver": "^7.7.3",
+ "type-fest": "^4.41.0",
+ "yargs-parser": "^21.1.1"
+ },
+ "bin": {
+ "ts-jest": "cli.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": ">=7.0.0-beta.0 <8",
+ "@jest/transform": "^29.0.0 || ^30.0.0",
+ "@jest/types": "^29.0.0 || ^30.0.0",
+ "babel-jest": "^29.0.0 || ^30.0.0",
+ "jest": "^29.0.0 || ^30.0.0",
+ "jest-util": "^29.0.0 || ^30.0.0",
+ "typescript": ">=4.3 <6"
+ },
+ "peerDependenciesMeta": {
+ "@babel/core": {
+ "optional": true
+ },
+ "@jest/transform": {
+ "optional": true
+ },
+ "@jest/types": {
+ "optional": true
+ },
+ "babel-jest": {
+ "optional": true
+ },
+ "esbuild": {
+ "optional": true
+ },
+ "jest-util": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/ts-jest/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/ts-jest/node_modules/type-fest": {
+ "version": "4.41.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz",
+ "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==",
+ "dev": true,
+ "license": "(MIT OR CC0-1.0)",
+ "engines": {
+ "node": ">=16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/type-detect": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
+ "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/type-fest": {
+ "version": "0.21.3",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
+ "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
+ "dev": true,
+ "license": "(MIT OR CC0-1.0)",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/typescript": {
+ "version": "5.9.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
+ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
+ "node_modules/uglify-js": {
+ "version": "3.19.3",
+ "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz",
+ "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "optional": true,
+ "bin": {
+ "uglifyjs": "bin/uglifyjs"
+ },
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/undici-types": {
+ "version": "6.21.0",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
+ "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
+ "license": "MIT"
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.4.tgz",
+ "integrity": "sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/v8-to-istanbul": {
+ "version": "9.3.0",
+ "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz",
+ "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.12",
+ "@types/istanbul-lib-coverage": "^2.0.1",
+ "convert-source-map": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10.12.0"
+ }
+ },
+ "node_modules/walker": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz",
+ "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "makeerror": "1.0.12"
+ }
+ },
+ "node_modules/web-streams-polyfill": {
+ "version": "4.0.0-beta.3",
+ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
+ "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "license": "MIT",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/wordwrap": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
+ "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/write-file-atomic": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz",
+ "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "imurmurhash": "^0.1.4",
+ "signal-exit": "^3.0.7"
+ },
+ "engines": {
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
+ }
+ },
+ "node_modules/y18n": {
+ "version": "5.0.8",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+ "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/yargs": {
+ "version": "17.7.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
+ "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cliui": "^8.0.1",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.3",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^21.1.1"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yargs-parser": {
+ "version": "21.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+ "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ }
+ }
+}
diff --git a/tests/integration/client-typescript/package.json b/tests/integration/client-typescript/package.json
new file mode 100644
index 000000000..e5fe1b8f5
--- /dev/null
+++ b/tests/integration/client-typescript/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "llama-stack-typescript-integration-tests",
+ "version": "0.0.1",
+ "private": true,
+ "description": "TypeScript client integration tests for Llama Stack",
+ "scripts": {
+ "test": "node run-tests.js"
+ },
+ "devDependencies": {
+ "@swc/core": "^1.3.102",
+ "@swc/jest": "^0.2.29",
+ "@types/jest": "^29.4.0",
+ "@types/node": "^20.0.0",
+ "jest": "^29.4.0",
+ "ts-jest": "^29.1.0",
+ "typescript": "^5.0.0"
+ }
+}
diff --git a/tests/integration/client-typescript/run-tests.js b/tests/integration/client-typescript/run-tests.js
new file mode 100755
index 000000000..93df5d8a0
--- /dev/null
+++ b/tests/integration/client-typescript/run-tests.js
@@ -0,0 +1,63 @@
+#!/usr/bin/env node
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/**
+ * Test runner that finds and executes TypeScript tests based on suite/setup mapping.
+ * Called by integration-tests.sh via npm test.
+ */
+
+const fs = require('fs');
+const path = require('path');
+const { execSync } = require('child_process');
+
+const suite = process.env.LLAMA_STACK_TEST_SUITE;
+const setup = process.env.LLAMA_STACK_TEST_SETUP || '';
+
+if (!suite) {
+ console.error('Error: LLAMA_STACK_TEST_SUITE environment variable is required');
+ process.exit(1);
+}
+
+// Read suites.json to find matching test files
+const suitesPath = path.join(__dirname, 'suites.json');
+if (!fs.existsSync(suitesPath)) {
+ console.log(`No TypeScript tests configured (${suitesPath} not found)`);
+ process.exit(0);
+}
+
+const suites = JSON.parse(fs.readFileSync(suitesPath, 'utf-8'));
+
+// Find matching entry
+let testFiles = [];
+for (const entry of suites) {
+ if (entry.suite !== suite) {
+ continue;
+ }
+ const entrySetup = entry.setup || '';
+ if (entrySetup && entrySetup !== setup) {
+ continue;
+ }
+ testFiles = entry.files || [];
+ break;
+}
+
+if (testFiles.length === 0) {
+ console.log(`No TypeScript integration tests mapped for suite ${suite} (setup ${setup})`);
+ process.exit(0);
+}
+
+console.log(`Running TypeScript tests for suite ${suite} (setup ${setup}): ${testFiles.join(', ')}`);
+
+// Run Jest with the mapped test files
+try {
+ execSync(`npx jest --config jest.integration.config.js ${testFiles.join(' ')}`, {
+ stdio: 'inherit',
+ cwd: __dirname,
+ });
+} catch (error) {
+ process.exit(error.status || 1);
+}
diff --git a/tests/integration/client-typescript/setup.ts b/tests/integration/client-typescript/setup.ts
new file mode 100644
index 000000000..75cabab74
--- /dev/null
+++ b/tests/integration/client-typescript/setup.ts
@@ -0,0 +1,162 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/**
+ * Global setup for integration tests.
+ * This file mimics pytest's fixture system by providing shared test configuration.
+ */
+
+import LlamaStackClient from 'llama-stack-client';
+
+/**
+ * Load test configuration from the Python setup system.
+ * This reads setup definitions from tests/integration/suites.py via get_setup_env.py.
+ */
+function loadTestConfig() {
+ const baseURL = process.env['TEST_API_BASE_URL'];
+ const setupName = process.env['LLAMA_STACK_TEST_SETUP'];
+ const textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL'];
+ const embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'];
+
+ if (!baseURL) {
+ throw new Error(
+ 'TEST_API_BASE_URL is required for integration tests. ' +
+ 'Run tests using: ./scripts/integration-test.sh',
+ );
+ }
+
+ return {
+ baseURL,
+ textModel,
+ embeddingModel,
+ setupName,
+ };
+}
+
+// Read configuration from environment variables (set by scripts/integration-test.sh)
+export const TEST_CONFIG = loadTestConfig();
+
+// Validate required configuration
+beforeAll(() => {
+ console.log('\n=== Integration Test Configuration ===');
+ console.log(`Base URL: ${TEST_CONFIG.baseURL}`);
+ console.log(`Setup: ${TEST_CONFIG.setupName || 'NOT SET'}`);
+ console.log(
+ `Text Model: ${TEST_CONFIG.textModel || 'NOT SET - tests requiring text model will be skipped'}`,
+ );
+ console.log(
+ `Embedding Model: ${
+ TEST_CONFIG.embeddingModel || 'NOT SET - tests requiring embedding model will be skipped'
+ }`,
+ );
+ console.log('=====================================\n');
+});
+
+/**
+ * Create a client instance for integration tests.
+ * Mimics pytest's `llama_stack_client` fixture.
+ *
+ * @param testId - Test ID to send in X-LlamaStack-Provider-Data header for replay mode.
+ * Format: "tests/integration/responses/test_basic_responses.py::test_name[params]"
+ */
+export function createTestClient(testId?: string): LlamaStackClient {
+ const headers: Record = {};
+
+ // In server mode with replay, send test ID for recording isolation
+ if (process.env['LLAMA_STACK_TEST_STACK_CONFIG_TYPE'] === 'server' && testId) {
+ headers['X-LlamaStack-Provider-Data'] = JSON.stringify({
+ __test_id: testId,
+ });
+ }
+
+ return new LlamaStackClient({
+ baseURL: TEST_CONFIG.baseURL,
+ timeout: 60000, // 60 seconds
+ defaultHeaders: headers,
+ });
+}
+
+/**
+ * Skip test if required model is not configured.
+ * Mimics pytest's `skip_if_no_model` autouse fixture.
+ */
+export function skipIfNoModel(modelType: 'text' | 'embedding'): typeof test {
+ const model = modelType === 'text' ? TEST_CONFIG.textModel : TEST_CONFIG.embeddingModel;
+
+ if (!model) {
+ const envVar = modelType === 'text' ? 'LLAMA_STACK_TEST_TEXT_MODEL' : 'LLAMA_STACK_TEST_EMBEDDING_MODEL';
+ const message = `Skipping: ${modelType} model not configured (set ${envVar})`;
+ return test.skip.bind(test) as typeof test;
+ }
+
+ return test;
+}
+
+/**
+ * Get the configured text model, throwing if not set.
+ * Use this in tests that absolutely require a text model.
+ */
+export function requireTextModel(): string {
+ if (!TEST_CONFIG.textModel) {
+ throw new Error(
+ 'LLAMA_STACK_TEST_TEXT_MODEL environment variable is required. ' +
+ 'Run tests using: ./scripts/integration-test.sh',
+ );
+ }
+ return TEST_CONFIG.textModel;
+}
+
+/**
+ * Get the configured embedding model, throwing if not set.
+ * Use this in tests that absolutely require an embedding model.
+ */
+export function requireEmbeddingModel(): string {
+ if (!TEST_CONFIG.embeddingModel) {
+ throw new Error(
+ 'LLAMA_STACK_TEST_EMBEDDING_MODEL environment variable is required. ' +
+ 'Run tests using: ./scripts/integration-test.sh',
+ );
+ }
+ return TEST_CONFIG.embeddingModel;
+}
+
+/**
+ * Extracts aggregated text output from a ResponseObject.
+ * This concatenates all text content from the response's output array.
+ *
+ * Copied from llama-stack-client's response-helpers until it's available in published version.
+ */
+export function getResponseOutputText(response: any): string {
+ const pieces: string[] = [];
+
+ for (const output of response.output ?? []) {
+ if (!output || output.type !== 'message') {
+ continue;
+ }
+
+ const content = output.content;
+ if (typeof content === 'string') {
+ pieces.push(content);
+ continue;
+ }
+
+ if (!Array.isArray(content)) {
+ continue;
+ }
+
+ for (const item of content) {
+ if (typeof item === 'string') {
+ pieces.push(item);
+ continue;
+ }
+ if (item && item.type === 'output_text' && 'text' in item && typeof item.text === 'string') {
+ pieces.push(item.text);
+ }
+ }
+ }
+
+ return pieces.join('');
+}
diff --git a/tests/integration/client-typescript/suites.json b/tests/integration/client-typescript/suites.json
new file mode 100644
index 000000000..5c5b83058
--- /dev/null
+++ b/tests/integration/client-typescript/suites.json
@@ -0,0 +1,12 @@
+[
+ {
+ "suite": "responses",
+ "setup": "gpt",
+ "files": ["__tests__/responses.test.ts"]
+ },
+ {
+ "suite": "base",
+ "setup": "ollama",
+ "files": ["__tests__/inference.test.ts"]
+ }
+]
diff --git a/tests/integration/client-typescript/tsconfig.json b/tests/integration/client-typescript/tsconfig.json
new file mode 100644
index 000000000..19b6cdeb1
--- /dev/null
+++ b/tests/integration/client-typescript/tsconfig.json
@@ -0,0 +1,16 @@
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ "module": "ES2022",
+ "lib": ["ES2022"],
+ "moduleResolution": "bundler",
+ "esModuleInterop": true,
+ "allowSyntheticDefaultImports": true,
+ "strict": true,
+ "skipLibCheck": true,
+ "resolveJsonModule": true,
+ "types": ["jest", "node"]
+ },
+ "include": ["**/*.ts"],
+ "exclude": ["node_modules"]
+}
diff --git a/tests/integration/files/test_files.py b/tests/integration/files/test_files.py
index 1f19c88c5..e8004c95d 100644
--- a/tests/integration/files/test_files.py
+++ b/tests/integration/files/test_files.py
@@ -175,7 +175,7 @@ def test_expires_after_requests(openai_client):
@pytest.mark.xfail(message="User isolation broken for current providers, must be fixed.")
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
def test_files_authentication_isolation(mock_get_authenticated_user, llama_stack_client):
"""Test that users can only access their own files."""
from llama_stack_client import NotFoundError
@@ -275,7 +275,7 @@ def test_files_authentication_isolation(mock_get_authenticated_user, llama_stack
raise e
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
def test_files_authentication_shared_attributes(
mock_get_authenticated_user, llama_stack_client, provider_type_is_openai
):
@@ -335,7 +335,7 @@ def test_files_authentication_shared_attributes(
raise e
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
def test_files_authentication_anonymous_access(
mock_get_authenticated_user, llama_stack_client, provider_type_is_openai
):
diff --git a/tests/integration/inference/test_tools_with_schemas.py b/tests/integration/inference/test_tools_with_schemas.py
index 5b6e69ae3..ab033c381 100644
--- a/tests/integration/inference/test_tools_with_schemas.py
+++ b/tests/integration/inference/test_tools_with_schemas.py
@@ -9,8 +9,6 @@ Integration tests for inference/chat completion with JSON Schema-based tools.
Tests that tools pass through correctly to various LLM providers.
"""
-import json
-
import pytest
from llama_stack.core.library_client import LlamaStackAsLibraryClient
@@ -193,22 +191,11 @@ class TestMCPToolsInChatCompletion:
mcp_endpoint=dict(uri=uri),
)
- # Use old header-based approach for Phase 1 (backward compatibility)
- provider_data = {
- "mcp_headers": {
- uri: {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
- },
- }
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
+ # Use the dedicated authorization parameter
# Get the tools from MCP
tools_response = llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Convert to OpenAI format for inference
diff --git a/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py b/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py
index ad9115756..4f4f4a8dd 100644
--- a/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py
+++ b/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py
@@ -13,14 +13,14 @@ import pytest
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.datatypes import User
from llama_stack.core.storage.datatypes import SqlStoreReference
-from llama_stack.providers.utils.sqlstore.api import ColumnType
-from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
-from llama_stack.providers.utils.sqlstore.sqlstore import (
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
+from llama_stack.core.storage.sqlstore.sqlstore import (
PostgresSqlStoreConfig,
SqliteSqlStoreConfig,
register_sqlstore_backends,
sqlstore_impl,
)
+from llama_stack_api.internal.sqlstore import ColumnType
def get_postgres_config():
@@ -96,7 +96,7 @@ async def cleanup_records(sql_store, table_name, record_ids):
@pytest.mark.parametrize("backend_config", BACKEND_CONFIGS)
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_authorized_store_attributes(mock_get_authenticated_user, authorized_store, request):
"""Test that JSON column comparisons work correctly for both PostgreSQL and SQLite"""
backend_name = request.node.callspec.id
@@ -190,7 +190,7 @@ async def test_authorized_store_attributes(mock_get_authenticated_user, authoriz
@pytest.mark.parametrize("backend_config", BACKEND_CONFIGS)
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_user_ownership_policy(mock_get_authenticated_user, authorized_store, request):
"""Test that 'user is owner' policies work correctly with record ownership"""
from llama_stack.core.access_control.datatypes import AccessRule, Action, Scope
diff --git a/tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json b/tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json
new file mode 100644
index 000000000..4418331b0
--- /dev/null
+++ b/tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json
@@ -0,0 +1,773 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1V9w3bXnppL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "function": {
+ "arguments": "",
+ "name": "get_experiment_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YEsj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"ex",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "n"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "perim",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Q"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "ent_na",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "me\":",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "U"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": " \"boi",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "ling_p",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "oint",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ha"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "d5D"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "function": {
+ "arguments": "",
+ "name": "get_user_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0LbsjDcKz6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "{\"us",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "c"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "ernam",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "e\": \"c",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "7C0WFn181I3y3l"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "harl",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "wf"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "ie\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "r"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "FAci"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 51,
+ "prompt_tokens": 393,
+ "total_tokens": 444,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "6xgpRRdKjviPT"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json b/tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json
new file mode 100644
index 000000000..3bec72d95
--- /dev/null
+++ b/tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json
@@ -0,0 +1,593 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_function_tools[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Can you tell me the weather in Paris and the current time?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "type": "function",
+ "name": "get_weather",
+ "description": "Get weather information for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')"
+ }
+ }
+ },
+ "strict": null
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "type": "function",
+ "name": "get_time",
+ "description": "Get current time for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')"
+ }
+ }
+ },
+ "strict": null
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "QmTXstGvpa8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_HJMoLtHXfCzhlMQOfqIKt0n3",
+ "function": {
+ "arguments": "",
+ "name": "get_weather"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "iFjmkK23KL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"lo",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "7"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "catio",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "L"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "n\": \"P",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "THa6gWbrWhVmZ6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aris",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "eL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "jng"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": "call_vGKvTKZM7aALMaUw3Jas7lRg",
+ "function": {
+ "arguments": "",
+ "name": "get_time"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "LSailgMcgSl54"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "{\"lo",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "catio",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "4"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "n\": \"P",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "0engr6vRvqXTEP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "aris",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "Pe"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "LU9"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "kD7d"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": {
+ "completion_tokens": 44,
+ "prompt_tokens": 110,
+ "total_tokens": 154,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "R4ICoxqTqj7ZY"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json b/tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json
new file mode 100644
index 000000000..ee32a4396
--- /dev/null
+++ b/tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json
@@ -0,0 +1,773 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "N5OTLR9CfmU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "function": {
+ "arguments": "",
+ "name": "get_experiment_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "3EKK"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"ex",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "R"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "perim",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Q"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "ent_na",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "me\":",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": " \"boi",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "ling_p",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "oint",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Gfk"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "function": {
+ "arguments": "",
+ "name": "get_user_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Yp7IueDs5V"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "{\"us",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "ernam",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "X"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "e\": \"c",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "2oif8BwVnTCnAF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "harl",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "hv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "ie\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "C"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ctjO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 51,
+ "prompt_tokens": 393,
+ "total_tokens": 444,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "fclbZeBSSKN4C"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json b/tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json
new file mode 100644
index 000000000..2f5d2364f
--- /dev/null
+++ b/tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json
@@ -0,0 +1,1099 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\": \"boiling_point\"}"
+ }
+ },
+ {
+ "index": 1,
+ "id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\": \"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "content": [
+ {
+ "type": "text",
+ "text": "user_11111"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YYi7jfwMArDwjF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "02OX5OI6tENcr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "4WNc0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "tKtJ1sl5pfaDr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Hvj1aWM1rpv8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9E9CvQfqolGi9S"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "bo",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "j4WB9GjVD9jcfN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "iling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "TTDWSqM29LF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "_point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "AjjxQybBbe"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1gVblRiURtILOET"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0R3NJvfpXy2dP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "A7ulc3isZRh1Wy"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "exp",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "FPq6iOQwJS1aQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Kc20HZgwXltY5rS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "004",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "2FCOJr6gSDviM"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "`,",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "zcC44JB9JLv8DJ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " and",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YkHz4dmGI8Ip"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "WU1FWVwHa8kT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "F89Whppjswq"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "WSOnxHfHCWTqS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "xdc4FO9TTNKE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "815WDeN0y91Hke"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "char",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "xp6WP0YmWjNZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "lie",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "apUUpE3jkpxjm"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "TfCA46aEfur7ddv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "4q5btS7EmyGo4"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "a5UVTkIvEXtjbH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "UGU1lPYHNno0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "4axBUdqWraTmuNf"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "111",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ZtMOpwGI78JEH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "11",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "LqPjHcx2BmtLO1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "`.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "l5q2xqEWQx4dA4"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "sM6qZWT3Vp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 32,
+ "prompt_tokens": 465,
+ "total_tokens": 497,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "Nr5ToBPpxyZu4"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json b/tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json
new file mode 100644
index 000000000..3c9321759
--- /dev/null
+++ b/tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json
@@ -0,0 +1,1099 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\": \"boiling_point\"}"
+ }
+ },
+ {
+ "index": 1,
+ "id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\": \"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "content": [
+ {
+ "type": "text",
+ "text": "user_11111"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "wwHFAiwvH4WszR"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9715Kiw8g6FeU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "f3RUP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "uTou0sZw0Trqr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "O3FUhiRX4t3O"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "8Row2VeWyXlavX"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "bo",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "R6KU5Aed2Y4hdt"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "iling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "aXOqmJlIAIp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "_point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "AEyQ67P1E9"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pxs1ElabWHWYTsE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "f4fvZlQAsoFLb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "XIUUCRzVlWEjdW"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "exp",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "x2dM9CVkT0ICQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Ls8dfHOXPeHjdGE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "004",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "RF1hpcOB964EM"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "`,",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "QnLWon1Lh1bPrb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " and",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0OHZT5bnbdwa"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "jtbU7bWjfj72"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "nCopvKj1JIE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "2ZDuFZoCixweF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "u3QmR0zYiExg"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "z6tGgyH3Gw667d"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "char",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "HalCDTgB5QRV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "lie",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "5UJBpMTsZMjVF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "p8zU7xEpcUR63Lh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "t0fKxlCyUxaFU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "lRSEHqi9mVmVZJ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "8C6DeNABBjpJ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "L4qXmW7bonqcf97"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "111",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "zje3cRhC3fzKb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "11",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "NgeVi1nYcUbkmN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "`.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "d83dlilKTeA1RE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "HnPRpNWz4n"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 32,
+ "prompt_tokens": 465,
+ "total_tokens": 497,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "sfrloH58kmZpA"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json b/tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json
new file mode 100644
index 000000000..821bd20c4
--- /dev/null
+++ b/tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json
@@ -0,0 +1,1634 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\": \"boiling_point\"}"
+ }
+ },
+ {
+ "index": 1,
+ "id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\": \"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "content": "Tool call skipped: maximum tool calls limit (1) reached."
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9zm2knPUrQf9Ti"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "dBZWt7n0cY28K"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "gBkUe"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "DK27AidkjJEUs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "BvRS3fe55saU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Q30TpKRJ8sqbaj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "bo",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "uZIcYxencsPVq7"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "iling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "OTlywqpO2gu"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "_point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1D39HJt78o"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "z9q3XLiA1zUj69i"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YilL3DwdzhGNE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "yLvB3LVIF9yqTB"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "exp",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "aQ2ZgA6wBrzgb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0jzpzruxw3CNxO3"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "004",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Wl5Eu8yWUoj2V"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "`.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "F3a7FpN1N5MOoL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " However",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "oC3Sc1Oj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": ",",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "dR3KxirqoL6RMvN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " I",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "HDIUF9MxNvDNC8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " wasn't",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "jvYMbj7Jb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " able",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "wA25F90roLY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " to",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1kP6AeTeGmGNU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " get",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "8zixGSMc9fiH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "UCSCTgIKkLiT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1hHm53qitSi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "N3NBeCvE43ZRW"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ul7bMYRpL04n"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ABgwNSe6WHqE9N"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "char",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "6q5tAeJOMEC8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "lie",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "gxcccAWJYWckn"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "qpqi3k54AaZDnNH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " due",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "OB5oYuchm2uE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " to",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "MKHpNGKsdWpLO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " a",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "zYt4J00NPy69fJ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " tool",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Z0kM0bozww8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " call",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "qbQA28Mr3PO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " limit",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ZzevZnpsYj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "QBno7Vj0QhMrSjO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " Please",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "hEj0RemlE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " let",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "xN8xRqzcxXCR"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " me",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0LxJ9leKvCunj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " know",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "KoHcgiBEVc6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " if",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "eT2hCjpvISlxh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " you",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9LJdcoWEzgMP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " would",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "bxChZ0IYYP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " like",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "oU5UBQRKEpI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " me",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "HQHzzykuhNV7v"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " to",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YJ86yXpqctfF5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " attempt",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ToTM0n5O"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " that",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "SateSvqBggb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " again",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "APRnnp4Qce"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Xe9yNJcVnFP4PZl"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ZH7NR5wSoI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 52,
+ "prompt_tokens": 474,
+ "total_tokens": 526,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "2P0uXrABC0X8d"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json b/tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json
new file mode 100644
index 000000000..450d84176
--- /dev/null
+++ b/tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json
@@ -0,0 +1,593 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_function_tools[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Can you tell me the weather in Paris and the current time?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "type": "function",
+ "name": "get_weather",
+ "description": "Get weather information for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')"
+ }
+ }
+ },
+ "strict": null
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "type": "function",
+ "name": "get_time",
+ "description": "Get current time for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')"
+ }
+ }
+ },
+ "strict": null
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "iUduPiCYBRb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_Wv3G8aEQOJLNXGRaK3hAWzq3",
+ "function": {
+ "arguments": "",
+ "name": "get_weather"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "cqZKgzm65y"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"lo",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "catio",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "L"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "n\": \"P",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "zbBLzavvnEdLz0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aris",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "Gj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "LQo"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": "call_8xkOmOgJpV77n5W2dSx6ytW6",
+ "function": {
+ "arguments": "",
+ "name": "get_time"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "eltoncGlxI8Go"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "{\"lo",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "S"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "catio",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "N"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "n\": \"P",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "2bTn1MaAXYFoVK"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "aris",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "VF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "BHi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "WaYG"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": {
+ "completion_tokens": 44,
+ "prompt_tokens": 110,
+ "total_tokens": 154,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "aevj6ZWLqfCK6"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json b/tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json
new file mode 100644
index 000000000..089242af3
--- /dev/null
+++ b/tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json
@@ -0,0 +1,1661 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\": \"boiling_point\"}"
+ }
+ },
+ {
+ "index": 1,
+ "id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\": \"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "content": "Tool call skipped: maximum tool calls limit (1) reached."
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "uoj10MYhhjCsjQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "RbrwfJ20BVqRi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "88xHU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "lXhzWF230RZCL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "McIrBR2XVfyS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "7SiItrYff13YKr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "bo",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pf232bD4VeXdXc"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "iling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "z0kyzhP7ioh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "_point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "3TUkmyiT28"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "kFAkj6BHwM6YKZQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "fiRWSM9LNpP4J"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "VRPBkgW9PrA6C7"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "exp",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YqSi9vVuexh3e"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "y64suQvx1Nfp8Pj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "004",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "kouF1KXaF3fSv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "`.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Ju1xHmwme71tPA"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " However",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "TZuAhRJ8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": ",",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ikVKxLAdOhUPHHa"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " I",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pntThOzs2GzlYs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " couldn't",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "v4ihoTx"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " retrieve",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "476NjPo"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "AFDAUQw3ezkM"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ztweLiyDuwu"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "q575s9DLRlXDL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "oEoKwHu8H1FD"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "KOgPjHTbZYg83A"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "char",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "PmTsVhsBBtRV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "lie",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "hkXsP7qhxNrQ0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "C9RtrovVHvrH33B"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " at",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "fhJHhlmbEWrnY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " this",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pvYlADlLGnc"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " time",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "N787ynNkyIU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " due",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "lkX5gCjexTSI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " to",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ecopEBh7Ckmai"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " a",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Nf1X9c8Z4TduoA"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " tool",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "MtnVKdm0UnR"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " call",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ExJ8aBPckoF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " limitation",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "jE7bT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "AaaLnYdPLucETYH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " Please",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "cPsBAfFXF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " let",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "nGUo5AX3lQpP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " me",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "shpHT1JYFdHrS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " know",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "RG8m7peAEPl"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " if",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "i4q8OeCvU08qi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " there's",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "lXBbPXWn"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " anything",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "EyZRgWl"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " else",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "h87NDUy4I75"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " I",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1CJqPAnvuBVEXV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " can",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9Ava6GiwMlu5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " assist",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "fl9TQoNlV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " you",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "4PwMuL1TPPvZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " with",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "XeIvTn2s7ap"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "!",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "U93F4p2ENgwWFKN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "3P0Kp8n8xH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 53,
+ "prompt_tokens": 474,
+ "total_tokens": 527,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "zjt0xUw7Sz8p9"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/test_tool_responses.py b/tests/integration/responses/test_tool_responses.py
index 742d45f8b..49bcd050b 100644
--- a/tests/integration/responses/test_tool_responses.py
+++ b/tests/integration/responses/test_tool_responses.py
@@ -600,3 +600,155 @@ def test_response_streaming_multi_turn_tool_execution(responses_client, text_mod
assert expected_output.lower() in final_response.output_text.lower(), (
f"Expected '{expected_output}' to appear in response: {final_response.output_text}"
)
+
+
+def test_max_tool_calls_with_function_tools(responses_client, text_model_id):
+ """Test handling of max_tool_calls with function tools in responses."""
+
+ max_tool_calls = 1
+ tools = [
+ {
+ "type": "function",
+ "name": "get_weather",
+ "description": "Get weather information for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')",
+ },
+ },
+ },
+ },
+ {
+ "type": "function",
+ "name": "get_time",
+ "description": "Get current time for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')",
+ },
+ },
+ },
+ },
+ ]
+
+ response = responses_client.responses.create(
+ model=text_model_id,
+ input="Can you tell me the weather in Paris and the current time?",
+ tools=tools,
+ stream=False,
+ max_tool_calls=max_tool_calls,
+ )
+
+ # Verify we got two function calls and that the max_tool_calls does not affect function tools
+ assert len(response.output) == 2
+ assert response.output[0].type == "function_call"
+ assert response.output[0].name == "get_weather"
+ assert response.output[0].status == "completed"
+ assert response.output[1].type == "function_call"
+ assert response.output[1].name == "get_time"
+ assert response.output[1].status == "completed"
+
+ # Verify we have a valid max_tool_calls field
+ assert response.max_tool_calls == max_tool_calls
+
+
+def test_max_tool_calls_invalid(responses_client, text_model_id):
+ """Test handling of invalid max_tool_calls in responses."""
+
+ input = "Search for today's top technology news."
+ invalid_max_tool_calls = 0
+ tools = [
+ {"type": "web_search"},
+ ]
+
+ # Create a response with an invalid max_tool_calls value i.e. 0
+ # Handle ValueError from LLS and BadRequestError from OpenAI client
+ with pytest.raises((ValueError, llama_stack_client.BadRequestError, openai.BadRequestError)) as excinfo:
+ responses_client.responses.create(
+ model=text_model_id,
+ input=input,
+ tools=tools,
+ stream=False,
+ max_tool_calls=invalid_max_tool_calls,
+ )
+
+ error_message = str(excinfo.value)
+ assert f"Invalid max_tool_calls={invalid_max_tool_calls}; should be >= 1" in error_message, (
+ f"Expected error message about invalid max_tool_calls, got: {error_message}"
+ )
+
+
+def test_max_tool_calls_with_mcp_tools(responses_client, text_model_id):
+ """Test handling of max_tool_calls with mcp tools in responses."""
+
+ with make_mcp_server(tools=dependency_tools()) as mcp_server_info:
+ input = "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ max_tool_calls = [1, 5]
+ tools = [
+ {"type": "mcp", "server_label": "localmcp", "server_url": mcp_server_info["server_url"]},
+ ]
+
+ # First create a response that triggers mcp tools without max_tool_calls
+ response = responses_client.responses.create(
+ model=text_model_id,
+ input=input,
+ tools=tools,
+ stream=False,
+ )
+
+ # Verify we got two mcp tool calls followed by a message
+ assert len(response.output) == 4
+ mcp_list_tools = [output for output in response.output if output.type == "mcp_list_tools"]
+ mcp_calls = [output for output in response.output if output.type == "mcp_call"]
+ message_outputs = [output for output in response.output if output.type == "message"]
+ assert len(mcp_list_tools) == 1
+ assert len(mcp_calls) == 2, f"Expected two mcp calls, got {len(mcp_calls)}"
+ assert len(message_outputs) == 1, f"Expected one message output, got {len(message_outputs)}"
+
+ # Next create a response that triggers mcp tools with max_tool_calls set to 1
+ response_2 = responses_client.responses.create(
+ model=text_model_id,
+ input=input,
+ tools=tools,
+ stream=False,
+ max_tool_calls=max_tool_calls[0],
+ )
+
+ # Verify we got one mcp tool call followed by a message
+ assert len(response_2.output) == 3
+ mcp_list_tools = [output for output in response_2.output if output.type == "mcp_list_tools"]
+ mcp_calls = [output for output in response_2.output if output.type == "mcp_call"]
+ message_outputs = [output for output in response_2.output if output.type == "message"]
+ assert len(mcp_list_tools) == 1
+ assert len(mcp_calls) == 1, f"Expected one mcp call, got {len(mcp_calls)}"
+ assert len(message_outputs) == 1, f"Expected one message output, got {len(message_outputs)}"
+
+ # Verify we have a valid max_tool_calls field
+ assert response_2.max_tool_calls == max_tool_calls[0]
+
+ # Finally create a response that triggers mcp tools with max_tool_calls set to 5
+ response_3 = responses_client.responses.create(
+ model=text_model_id,
+ input=input,
+ tools=tools,
+ stream=False,
+ max_tool_calls=max_tool_calls[1],
+ )
+
+ # Verify we got two mcp tool calls followed by a message
+ assert len(response_3.output) == 4
+ mcp_list_tools = [output for output in response_3.output if output.type == "mcp_list_tools"]
+ mcp_calls = [output for output in response_3.output if output.type == "mcp_call"]
+ message_outputs = [output for output in response_3.output if output.type == "message"]
+ assert len(mcp_list_tools) == 1
+ assert len(mcp_calls) == 2, f"Expected two mcp calls, got {len(mcp_calls)}"
+ assert len(message_outputs) == 1, f"Expected one message output, got {len(message_outputs)}"
+
+ # Verify we have a valid max_tool_calls field
+ assert response_3.max_tool_calls == max_tool_calls[1]
diff --git a/tests/integration/suites.py b/tests/integration/suites.py
index 7689657b4..10c872705 100644
--- a/tests/integration/suites.py
+++ b/tests/integration/suites.py
@@ -50,7 +50,7 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
name="ollama",
description="Local Ollama provider with text + safety models",
env={
- "OLLAMA_URL": "http://0.0.0.0:11434",
+ "OLLAMA_URL": "http://0.0.0.0:11434/v1",
"SAFETY_MODEL": "ollama/llama-guard3:1b",
},
defaults={
@@ -64,7 +64,7 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
name="ollama",
description="Local Ollama provider with a vision model",
env={
- "OLLAMA_URL": "http://0.0.0.0:11434",
+ "OLLAMA_URL": "http://0.0.0.0:11434/v1",
},
defaults={
"vision_model": "ollama/llama3.2-vision:11b",
@@ -75,7 +75,7 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
name="ollama-postgres",
description="Server-mode tests with Postgres-backed persistence",
env={
- "OLLAMA_URL": "http://0.0.0.0:11434",
+ "OLLAMA_URL": "http://0.0.0.0:11434/v1",
"SAFETY_MODEL": "ollama/llama-guard3:1b",
"POSTGRES_HOST": "127.0.0.1",
"POSTGRES_PORT": "5432",
diff --git a/tests/integration/tool_runtime/test_mcp.py b/tests/integration/tool_runtime/test_mcp.py
index 1b7f509d2..074a92afb 100644
--- a/tests/integration/tool_runtime/test_mcp.py
+++ b/tests/integration/tool_runtime/test_mcp.py
@@ -4,8 +4,6 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-import json
-
import pytest
from llama_stack_client.lib.agents.agent import Agent
from llama_stack_client.lib.agents.turn_events import StepCompleted, StepProgress, ToolCallIssuedDelta
@@ -37,32 +35,20 @@ def test_mcp_invocation(llama_stack_client, text_model_id, mcp_server):
mcp_endpoint=dict(uri=uri),
)
- # Use old header-based approach for Phase 1 (backward compatibility)
- provider_data = {
- "mcp_headers": {
- uri: {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
- },
- }
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
- with pytest.raises(Exception, match="Unauthorized"):
- llama_stack_client.tools.list(toolgroup_id=test_toolgroup_id)
-
- tools_list = llama_stack_client.tools.list(
- toolgroup_id=test_toolgroup_id,
- extra_headers=auth_headers, # Use old header-based approach
+ # Use the dedicated authorization parameter (no more provider_data headers)
+ # This tests direct tool_runtime.invoke_tool API calls
+ tools_list = llama_stack_client.tool_runtime.list_tools(
+ tool_group_id=test_toolgroup_id,
+ authorization=AUTH_TOKEN, # Use dedicated authorization parameter
)
assert len(tools_list) == 2
assert {t.name for t in tools_list} == {"greet_everyone", "get_boiling_point"}
+ # Invoke tool with authorization parameter
response = llama_stack_client.tool_runtime.invoke_tool(
tool_name="greet_everyone",
kwargs=dict(url="https://www.google.com"),
- extra_headers=auth_headers, # Use old header-based approach
+ authorization=AUTH_TOKEN, # Use dedicated authorization parameter
)
content = response.content
assert len(content) == 1
diff --git a/tests/integration/tool_runtime/test_mcp_json_schema.py b/tests/integration/tool_runtime/test_mcp_json_schema.py
index 719588c7f..6be71caaf 100644
--- a/tests/integration/tool_runtime/test_mcp_json_schema.py
+++ b/tests/integration/tool_runtime/test_mcp_json_schema.py
@@ -8,8 +8,6 @@
Tests $ref, $defs, and other JSON Schema features through MCP integration.
"""
-import json
-
import pytest
from llama_stack.core.library_client import LlamaStackAsLibraryClient
@@ -122,22 +120,11 @@ class TestMCPSchemaPreservation:
mcp_endpoint=dict(uri=uri),
)
- # Use old header-based approach for Phase 1 (backward compatibility)
- provider_data = {
- "mcp_headers": {
- uri: {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
- },
- }
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
+ # Use the dedicated authorization parameter
# List runtime tools
response = llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
tools = response
@@ -173,22 +160,11 @@ class TestMCPSchemaPreservation:
mcp_endpoint=dict(uri=uri),
)
- # Use old header-based approach for Phase 1 (backward compatibility)
- provider_data = {
- "mcp_headers": {
- uri: {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
- },
- }
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
+ # Use the dedicated authorization parameter
# List tools
response = llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Find book_flight tool (which should have $ref/$defs)
@@ -230,21 +206,10 @@ class TestMCPSchemaPreservation:
mcp_endpoint=dict(uri=uri),
)
- # Use old header-based approach for Phase 1 (backward compatibility)
- provider_data = {
- "mcp_headers": {
- uri: {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
- },
- }
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
+ # Use the dedicated authorization parameter
response = llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Find get_weather tool
@@ -284,22 +249,10 @@ class TestMCPToolInvocation:
mcp_endpoint=dict(uri=uri),
)
- # Use old header-based approach for Phase 1 (backward compatibility)
- provider_data = {
- "mcp_headers": {
- uri: {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
- },
- }
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
- # List tools to populate the tool index
+ # Use the dedicated authorization parameter
llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Invoke tool with complex nested data
@@ -311,7 +264,7 @@ class TestMCPToolInvocation:
"shipping": {"address": {"street": "123 Main St", "city": "San Francisco", "zipcode": "94102"}},
}
},
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Should succeed without schema validation errors
@@ -337,29 +290,17 @@ class TestMCPToolInvocation:
mcp_endpoint=dict(uri=uri),
)
- # Use old header-based approach for Phase 1 (backward compatibility)
- provider_data = {
- "mcp_headers": {
- uri: {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
- },
- }
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
- # List tools to populate the tool index
+ # Use the dedicated authorization parameter
llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Test with email format
result_email = llama_stack_client.tool_runtime.invoke_tool(
tool_name="flexible_contact",
kwargs={"contact_info": "user@example.com"},
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
assert result_email.error_message is None
@@ -368,7 +309,7 @@ class TestMCPToolInvocation:
result_phone = llama_stack_client.tool_runtime.invoke_tool(
tool_name="flexible_contact",
kwargs={"contact_info": "+15551234567"},
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
assert result_phone.error_message is None
@@ -400,21 +341,10 @@ class TestAgentWithMCPTools:
mcp_endpoint=dict(uri=uri),
)
- # Use old header-based approach for Phase 1 (backward compatibility)
- provider_data = {
- "mcp_headers": {
- uri: {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
- },
- }
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
- tools_list = llama_stack_client.tools.list(
- toolgroup_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ # Use the dedicated authorization parameter
+ tools_list = llama_stack_client.tool_runtime.list_tools(
+ tool_group_id=test_toolgroup_id,
+ authorization=AUTH_TOKEN,
)
tool_defs = [
{
diff --git a/tests/unit/conversations/test_conversations.py b/tests/unit/conversations/test_conversations.py
index 95c54d379..3f9df5fc0 100644
--- a/tests/unit/conversations/test_conversations.py
+++ b/tests/unit/conversations/test_conversations.py
@@ -23,7 +23,7 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import OpenAIResponseInputMessageContentText, OpenAIResponseMessage
@@ -38,6 +38,9 @@ async def service():
},
stores=ServerStoresConfig(
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
+ metadata=None,
+ inference=None,
+ prompts=None,
),
)
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
@@ -142,6 +145,9 @@ async def test_policy_configuration():
},
stores=ServerStoresConfig(
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
+ metadata=None,
+ inference=None,
+ prompts=None,
),
)
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
diff --git a/tests/unit/core/test_stack_validation.py b/tests/unit/core/test_stack_validation.py
index 462a25c8b..5f75bc522 100644
--- a/tests/unit/core/test_stack_validation.py
+++ b/tests/unit/core/test_stack_validation.py
@@ -10,8 +10,9 @@ from unittest.mock import AsyncMock
import pytest
-from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, StorageConfig, VectorStoresConfig
+from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, VectorStoresConfig
from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config
+from llama_stack.core.storage.datatypes import ServerStoresConfig, StorageConfig
from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield
@@ -21,7 +22,15 @@ class TestVectorStoresValidation:
run_config = StackRunConfig(
image_name="test",
providers={},
- storage=StorageConfig(backends={}, stores={}),
+ storage=StorageConfig(
+ backends={},
+ stores=ServerStoresConfig(
+ metadata=None,
+ inference=None,
+ conversations=None,
+ prompts=None,
+ ),
+ ),
vector_stores=VectorStoresConfig(
default_provider_id="faiss",
default_embedding_model=QualifiedModel(
@@ -41,7 +50,15 @@ class TestVectorStoresValidation:
run_config = StackRunConfig(
image_name="test",
providers={},
- storage=StorageConfig(backends={}, stores={}),
+ storage=StorageConfig(
+ backends={},
+ stores=ServerStoresConfig(
+ metadata=None,
+ inference=None,
+ conversations=None,
+ prompts=None,
+ ),
+ ),
vector_stores=VectorStoresConfig(
default_provider_id="faiss",
default_embedding_model=QualifiedModel(
diff --git a/tests/unit/files/test_files.py b/tests/unit/files/test_files.py
index 793f4edd3..197038349 100644
--- a/tests/unit/files/test_files.py
+++ b/tests/unit/files/test_files.py
@@ -9,11 +9,11 @@ import pytest
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.inline.files.localfs import (
LocalfsFilesImpl,
LocalfsFilesImplConfig,
)
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import OpenAIFilePurpose, Order, ResourceNotFoundError
diff --git a/tests/unit/fixtures.py b/tests/unit/fixtures.py
index 443a1d371..9e049f8da 100644
--- a/tests/unit/fixtures.py
+++ b/tests/unit/fixtures.py
@@ -6,9 +6,9 @@
import pytest
+from llama_stack.core.storage.kvstore.config import SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore.sqlite import SqliteKVStoreImpl
from llama_stack.core.store.registry import CachedDiskDistributionRegistry, DiskDistributionRegistry
-from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
-from llama_stack.providers.utils.kvstore.sqlite import SqliteKVStoreImpl
@pytest.fixture(scope="function")
diff --git a/tests/unit/prompts/prompts/conftest.py b/tests/unit/prompts/prompts/conftest.py
index c876f2041..8bfc1f03c 100644
--- a/tests/unit/prompts/prompts/conftest.py
+++ b/tests/unit/prompts/prompts/conftest.py
@@ -18,7 +18,7 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
-from llama_stack.providers.utils.kvstore import register_kvstore_backends
+from llama_stack.core.storage.kvstore import register_kvstore_backends
@pytest.fixture
diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses.py b/tests/unit/providers/agents/meta_reference/test_openai_responses.py
index 78f0d7cfd..97bccbfe4 100644
--- a/tests/unit/providers/agents/meta_reference/test_openai_responses.py
+++ b/tests/unit/providers/agents/meta_reference/test_openai_responses.py
@@ -17,6 +17,7 @@ from openai.types.chat.chat_completion_chunk import (
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
OpenAIResponsesImpl,
)
@@ -24,7 +25,13 @@ from llama_stack.providers.utils.responses.responses_store import (
ResponsesStore,
_OpenAIResponseObjectWithInputAndMessages,
)
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack_api import (
+ OpenAIChatCompletionContentPartImageParam,
+ OpenAIFile,
+ OpenAIFileObject,
+ OpenAISystemMessageParam,
+ Prompt,
+)
from llama_stack_api.agents import Order
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
@@ -38,6 +45,8 @@ from llama_stack_api.inference import (
)
from llama_stack_api.openai_responses import (
ListOpenAIResponseInputItem,
+ OpenAIResponseInputMessageContentFile,
+ OpenAIResponseInputMessageContentImage,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputToolFunction,
OpenAIResponseInputToolMCP,
@@ -47,6 +56,7 @@ from llama_stack_api.openai_responses import (
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseOutputMessageMCPCall,
OpenAIResponseOutputMessageWebSearchToolCall,
+ OpenAIResponsePrompt,
OpenAIResponseText,
OpenAIResponseTextFormat,
WebSearchToolTypes,
@@ -98,6 +108,19 @@ def mock_safety_api():
return safety_api
+@pytest.fixture
+def mock_prompts_api():
+ prompts_api = AsyncMock()
+ return prompts_api
+
+
+@pytest.fixture
+def mock_files_api():
+ """Mock files API for testing."""
+ files_api = AsyncMock()
+ return files_api
+
+
@pytest.fixture
def openai_responses_impl(
mock_inference_api,
@@ -107,6 +130,8 @@ def openai_responses_impl(
mock_vector_io_api,
mock_safety_api,
mock_conversations_api,
+ mock_prompts_api,
+ mock_files_api,
):
return OpenAIResponsesImpl(
inference_api=mock_inference_api,
@@ -116,6 +141,8 @@ def openai_responses_impl(
vector_io_api=mock_vector_io_api,
safety_api=mock_safety_api,
conversations_api=mock_conversations_api,
+ prompts_api=mock_prompts_api,
+ files_api=mock_files_api,
)
@@ -499,7 +526,7 @@ async def test_create_openai_response_with_tool_call_function_arguments_none(ope
mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall()
-async def test_create_openai_response_with_multiple_messages(openai_responses_impl, mock_inference_api):
+async def test_create_openai_response_with_multiple_messages(openai_responses_impl, mock_inference_api, mock_files_api):
"""Test creating an OpenAI response with multiple messages."""
# Setup
input_messages = [
@@ -710,7 +737,7 @@ async def test_create_openai_response_with_instructions(openai_responses_impl, m
async def test_create_openai_response_with_instructions_and_multiple_messages(
- openai_responses_impl, mock_inference_api
+ openai_responses_impl, mock_inference_api, mock_files_api
):
# Setup
input_messages = [
@@ -1242,3 +1269,489 @@ async def test_create_openai_response_with_output_types_as_input(
assert stored_with_outputs.input == input_with_output_types
assert len(stored_with_outputs.input) == 3
+
+
+async def test_create_openai_response_with_prompt(openai_responses_impl, mock_inference_api, mock_prompts_api):
+ """Test creating an OpenAI response with a prompt."""
+ input_text = "What is the capital of Ireland?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="You are a helpful {{ area_name }} assistant at {{ company_name }}. Always provide accurate information.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["area_name", "company_name"],
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "area_name": OpenAIResponseInputMessageContentText(text="geography"),
+ "company_name": OpenAIResponseInputMessageContentText(text="Dummy Company"),
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ prompt=openai_response_prompt,
+ )
+
+ mock_prompts_api.get_prompt.assert_called_with(prompt_id, 1)
+ mock_inference_api.openai_chat_completion.assert_called()
+ call_args = mock_inference_api.openai_chat_completion.call_args
+ sent_messages = call_args.args[0].messages
+ assert len(sent_messages) == 2
+
+ system_messages = [msg for msg in sent_messages if msg.role == "system"]
+ assert len(system_messages) == 1
+ assert (
+ system_messages[0].content
+ == "You are a helpful geography assistant at Dummy Company. Always provide accurate information."
+ )
+
+ user_messages = [msg for msg in sent_messages if msg.role == "user"]
+ assert len(user_messages) == 1
+ assert user_messages[0].content == input_text
+
+ assert result.model == model
+ assert result.status == "completed"
+ assert isinstance(result.prompt, OpenAIResponsePrompt)
+ assert result.prompt.id == prompt_id
+ assert result.prompt.variables == openai_response_prompt.variables
+ assert result.prompt.version == "1"
+
+
+async def test_prepend_prompt_successful_without_variables(openai_responses_impl, mock_prompts_api, mock_inference_api):
+ """Test prepend_prompt function without variables."""
+ input_text = "What is the capital of Ireland?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="You are a helpful assistant. Always provide accurate information.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=[],
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(id=prompt_id, version="1")
+
+ mock_prompts_api.get_prompt.return_value = prompt
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ prompt=openai_response_prompt,
+ )
+
+ mock_prompts_api.get_prompt.assert_called_with(prompt_id, 1)
+ mock_inference_api.openai_chat_completion.assert_called()
+ call_args = mock_inference_api.openai_chat_completion.call_args
+ sent_messages = call_args.args[0].messages
+ assert len(sent_messages) == 2
+ system_messages = [msg for msg in sent_messages if msg.role == "system"]
+ assert system_messages[0].content == "You are a helpful assistant. Always provide accurate information."
+
+
+async def test_prepend_prompt_invalid_variable(openai_responses_impl, mock_prompts_api):
+ """Test error handling in prepend_prompt function when prompt parameters contain invalid variables."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="You are a {{ role }} assistant.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["role"], # Only "role" is valid
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "role": OpenAIResponseInputMessageContentText(text="helpful"),
+ "company": OpenAIResponseInputMessageContentText(
+ text="Dummy Company"
+ ), # company is not in prompt.variables
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Test prompt")]
+
+ # Execute - should raise ValueError for invalid variable
+ with pytest.raises(ValueError, match="Variable company not found in prompt"):
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ # Verify
+ mock_prompts_api.get_prompt.assert_called_once_with(prompt_id, 1)
+
+
+async def test_prepend_prompt_not_found(openai_responses_impl, mock_prompts_api):
+ """Test prepend_prompt function when prompt is not found."""
+ prompt_id = "pmpt_nonexistent"
+ openai_response_prompt = OpenAIResponsePrompt(id=prompt_id, version="1")
+
+ mock_prompts_api.get_prompt.return_value = None # Prompt not found
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Test prompt")]
+ initial_length = len(messages)
+
+ # Execute
+ result = await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ # Verify
+ mock_prompts_api.get_prompt.assert_called_once_with(prompt_id, 1)
+
+ # Should return None when prompt not found
+ assert result is None
+
+ # Messages should not be modified
+ assert len(messages) == initial_length
+ assert messages[0].content == "Test prompt"
+
+
+async def test_prepend_prompt_variable_substitution(openai_responses_impl, mock_prompts_api):
+ """Test complex variable substitution with multiple occurrences and special characters in prepend_prompt function."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+
+ # Support all whitespace variations: {{name}}, {{ name }}, {{ name}}, {{name }}, etc.
+ prompt = Prompt(
+ prompt="Hello {{name}}! You are working at {{ company}}. Your role is {{role}} at {{company}}. Remember, {{ name }}, to be {{ tone }}.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["name", "company", "role", "tone"],
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "name": OpenAIResponseInputMessageContentText(text="Alice"),
+ "company": OpenAIResponseInputMessageContentText(text="Dummy Company"),
+ "role": OpenAIResponseInputMessageContentText(text="AI Assistant"),
+ "tone": OpenAIResponseInputMessageContentText(text="professional"),
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Test")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ # Verify
+ assert len(messages) == 2
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ expected_content = "Hello Alice! You are working at Dummy Company. Your role is AI Assistant at Dummy Company. Remember, Alice, to be professional."
+ assert messages[0].content == expected_content
+
+
+async def test_prepend_prompt_with_image_variable(openai_responses_impl, mock_prompts_api, mock_files_api):
+ """Test prepend_prompt with image variable - should create placeholder in system message and append image as separate user message."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Analyze this {{product_image}} and describe what you see.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["product_image"],
+ is_default=True,
+ )
+
+ # Mock file content and file metadata
+ mock_file_content = b"fake_image_data"
+ mock_files_api.openai_retrieve_file_content.return_value = type("obj", (object,), {"body": mock_file_content})()
+ mock_files_api.openai_retrieve_file.return_value = OpenAIFileObject(
+ object="file",
+ id="file-abc123",
+ bytes=len(mock_file_content),
+ created_at=1234567890,
+ expires_at=1234567890,
+ filename="product.jpg",
+ purpose="assistants",
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "product_image": OpenAIResponseInputMessageContentImage(
+ file_id="file-abc123",
+ detail="high",
+ )
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="What do you think?")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ assert len(messages) == 3
+
+ # Check system message has placeholder
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ assert messages[0].content == "Analyze this [Image: product_image] and describe what you see."
+
+ # Check original user message is still there
+ assert isinstance(messages[1], OpenAIUserMessageParam)
+ assert messages[1].content == "What do you think?"
+
+ # Check new user message with image is appended
+ assert isinstance(messages[2], OpenAIUserMessageParam)
+ assert isinstance(messages[2].content, list)
+ assert len(messages[2].content) == 1
+
+ # Should be image with data URL
+ assert isinstance(messages[2].content[0], OpenAIChatCompletionContentPartImageParam)
+ assert messages[2].content[0].image_url.url.startswith("data:image/")
+ assert messages[2].content[0].image_url.detail == "high"
+
+
+async def test_prepend_prompt_with_file_variable(openai_responses_impl, mock_prompts_api, mock_files_api):
+ """Test prepend_prompt with file variable - should create placeholder in system message and append file as separate user message."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Review the document {{contract_file}} and summarize key points.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["contract_file"],
+ is_default=True,
+ )
+
+ # Mock file retrieval
+ mock_file_content = b"fake_pdf_content"
+ mock_files_api.openai_retrieve_file_content.return_value = type("obj", (object,), {"body": mock_file_content})()
+ mock_files_api.openai_retrieve_file.return_value = OpenAIFileObject(
+ object="file",
+ id="file-contract-789",
+ bytes=len(mock_file_content),
+ created_at=1234567890,
+ expires_at=1234567890,
+ filename="contract.pdf",
+ purpose="assistants",
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "contract_file": OpenAIResponseInputMessageContentFile(
+ file_id="file-contract-789",
+ filename="contract.pdf",
+ )
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Please review this.")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ assert len(messages) == 3
+
+ # Check system message has placeholder
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ assert messages[0].content == "Review the document [File: contract_file] and summarize key points."
+
+ # Check original user message is still there
+ assert isinstance(messages[1], OpenAIUserMessageParam)
+ assert messages[1].content == "Please review this."
+
+ # Check new user message with file is appended
+ assert isinstance(messages[2], OpenAIUserMessageParam)
+ assert isinstance(messages[2].content, list)
+ assert len(messages[2].content) == 1
+
+ # First part should be file with data URL
+ assert isinstance(messages[2].content[0], OpenAIFile)
+ assert messages[2].content[0].file.file_data.startswith("data:application/pdf;base64,")
+ assert messages[2].content[0].file.filename == "contract.pdf"
+ assert messages[2].content[0].file.file_id is None
+
+
+async def test_prepend_prompt_with_mixed_variables(openai_responses_impl, mock_prompts_api, mock_files_api):
+ """Test prepend_prompt with text, image, and file variables mixed together."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Hello {{name}}! Analyze {{photo}} and review {{document}}. Provide insights for {{company}}.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["name", "photo", "document", "company"],
+ is_default=True,
+ )
+
+ # Mock file retrieval for image and file
+ mock_image_content = b"fake_image_data"
+ mock_file_content = b"fake_doc_content"
+
+ async def mock_retrieve_file_content(file_id):
+ if file_id == "file-photo-123":
+ return type("obj", (object,), {"body": mock_image_content})()
+ elif file_id == "file-doc-456":
+ return type("obj", (object,), {"body": mock_file_content})()
+
+ mock_files_api.openai_retrieve_file_content.side_effect = mock_retrieve_file_content
+
+ def mock_retrieve_file(file_id):
+ if file_id == "file-photo-123":
+ return OpenAIFileObject(
+ object="file",
+ id="file-photo-123",
+ bytes=len(mock_image_content),
+ created_at=1234567890,
+ expires_at=1234567890,
+ filename="photo.jpg",
+ purpose="assistants",
+ )
+ elif file_id == "file-doc-456":
+ return OpenAIFileObject(
+ object="file",
+ id="file-doc-456",
+ bytes=len(mock_file_content),
+ created_at=1234567890,
+ expires_at=1234567890,
+ filename="doc.pdf",
+ purpose="assistants",
+ )
+
+ mock_files_api.openai_retrieve_file.side_effect = mock_retrieve_file
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "name": OpenAIResponseInputMessageContentText(text="Alice"),
+ "photo": OpenAIResponseInputMessageContentImage(file_id="file-photo-123", detail="auto"),
+ "document": OpenAIResponseInputMessageContentFile(file_id="file-doc-456", filename="doc.pdf"),
+ "company": OpenAIResponseInputMessageContentText(text="Acme Corp"),
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Here's my question.")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ assert len(messages) == 3
+
+ # Check system message has text and placeholders
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ expected_system = "Hello Alice! Analyze [Image: photo] and review [File: document]. Provide insights for Acme Corp."
+ assert messages[0].content == expected_system
+
+ # Check original user message is still there
+ assert isinstance(messages[1], OpenAIUserMessageParam)
+ assert messages[1].content == "Here's my question."
+
+ # Check new user message with media is appended (2 media items)
+ assert isinstance(messages[2], OpenAIUserMessageParam)
+ assert isinstance(messages[2].content, list)
+ assert len(messages[2].content) == 2
+
+ # First part should be image with data URL
+ assert isinstance(messages[2].content[0], OpenAIChatCompletionContentPartImageParam)
+ assert messages[2].content[0].image_url.url.startswith("data:image/")
+
+ # Second part should be file with data URL
+ assert isinstance(messages[2].content[1], OpenAIFile)
+ assert messages[2].content[1].file.file_data.startswith("data:application/pdf;base64,")
+ assert messages[2].content[1].file.filename == "doc.pdf"
+ assert messages[2].content[1].file.file_id is None
+
+
+async def test_prepend_prompt_with_image_using_image_url(openai_responses_impl, mock_prompts_api):
+ """Test prepend_prompt with image variable using image_url instead of file_id."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Describe {{screenshot}}.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["screenshot"],
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "screenshot": OpenAIResponseInputMessageContentImage(
+ image_url="https://example.com/screenshot.png",
+ detail="low",
+ )
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="What is this?")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ assert len(messages) == 3
+
+ # Check system message has placeholder
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ assert messages[0].content == "Describe [Image: screenshot]."
+
+ # Check original user message is still there
+ assert isinstance(messages[1], OpenAIUserMessageParam)
+ assert messages[1].content == "What is this?"
+
+ # Check new user message with image is appended
+ assert isinstance(messages[2], OpenAIUserMessageParam)
+ assert isinstance(messages[2].content, list)
+
+ # Image should use the provided URL
+ assert isinstance(messages[2].content[0], OpenAIChatCompletionContentPartImageParam)
+ assert messages[2].content[0].image_url.url == "https://example.com/screenshot.png"
+ assert messages[2].content[0].image_url.detail == "low"
+
+
+async def test_prepend_prompt_image_variable_missing_required_fields(openai_responses_impl, mock_prompts_api):
+ """Test prepend_prompt with image variable that has neither file_id nor image_url - should raise error."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Analyze {{bad_image}}.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["bad_image"],
+ is_default=True,
+ )
+
+ # Create image content with neither file_id nor image_url
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={"bad_image": OpenAIResponseInputMessageContentImage()}, # No file_id or image_url
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+ messages = [OpenAIUserMessageParam(content="Test")]
+
+ # Execute - should raise ValueError
+ with pytest.raises(ValueError, match="Image content must have either 'image_url' or 'file_id'"):
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py b/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py
index fa1ddae78..5a3e6bf21 100644
--- a/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py
+++ b/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py
@@ -39,6 +39,8 @@ def responses_impl_with_conversations(
mock_vector_io_api,
mock_conversations_api,
mock_safety_api,
+ mock_prompts_api,
+ mock_files_api,
):
"""Create OpenAIResponsesImpl instance with conversations API."""
return OpenAIResponsesImpl(
@@ -49,6 +51,8 @@ def responses_impl_with_conversations(
vector_io_api=mock_vector_io_api,
conversations_api=mock_conversations_api,
safety_api=mock_safety_api,
+ prompts_api=mock_prompts_api,
+ files_api=mock_files_api,
)
diff --git a/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py b/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py
index b7a437686..e496a96e3 100644
--- a/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py
+++ b/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py
@@ -5,6 +5,8 @@
# the root directory of this source tree.
+from unittest.mock import AsyncMock
+
import pytest
from llama_stack.providers.inline.agents.meta_reference.responses.utils import (
@@ -46,6 +48,12 @@ from llama_stack_api.openai_responses import (
)
+@pytest.fixture
+def mock_files_api():
+ """Mock files API for testing."""
+ return AsyncMock()
+
+
class TestConvertChatChoiceToResponseMessage:
async def test_convert_string_content(self):
choice = OpenAIChoice(
@@ -78,17 +86,17 @@ class TestConvertChatChoiceToResponseMessage:
class TestConvertResponseContentToChatContent:
- async def test_convert_string_content(self):
- result = await convert_response_content_to_chat_content("Simple string")
+ async def test_convert_string_content(self, mock_files_api):
+ result = await convert_response_content_to_chat_content("Simple string", mock_files_api)
assert result == "Simple string"
- async def test_convert_text_content_parts(self):
+ async def test_convert_text_content_parts(self, mock_files_api):
content = [
OpenAIResponseInputMessageContentText(text="First part"),
OpenAIResponseOutputMessageContentOutputText(text="Second part"),
]
- result = await convert_response_content_to_chat_content(content)
+ result = await convert_response_content_to_chat_content(content, mock_files_api)
assert len(result) == 2
assert isinstance(result[0], OpenAIChatCompletionContentPartTextParam)
@@ -96,10 +104,10 @@ class TestConvertResponseContentToChatContent:
assert isinstance(result[1], OpenAIChatCompletionContentPartTextParam)
assert result[1].text == "Second part"
- async def test_convert_image_content(self):
+ async def test_convert_image_content(self, mock_files_api):
content = [OpenAIResponseInputMessageContentImage(image_url="https://example.com/image.jpg", detail="high")]
- result = await convert_response_content_to_chat_content(content)
+ result = await convert_response_content_to_chat_content(content, mock_files_api)
assert len(result) == 1
assert isinstance(result[0], OpenAIChatCompletionContentPartImageParam)
diff --git a/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py b/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py
index d4d1b872a..a914bbef4 100644
--- a/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py
+++ b/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py
@@ -30,6 +30,8 @@ def mock_apis():
"vector_io_api": AsyncMock(),
"conversations_api": AsyncMock(),
"safety_api": AsyncMock(),
+ "prompts_api": AsyncMock(),
+ "files_api": AsyncMock(),
}
diff --git a/tests/unit/providers/agents/meta_reference/test_safety_optional.py b/tests/unit/providers/agents/meta_reference/test_safety_optional.py
new file mode 100644
index 000000000..c2311b68f
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/test_safety_optional.py
@@ -0,0 +1,214 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+"""Tests for making Safety API optional in meta-reference agents provider.
+
+This test suite validates the changes introduced to fix issue #4165, which
+allows running the meta-reference agents provider without the Safety API.
+Safety API is now an optional dependency, and errors are raised at request time
+when guardrails are explicitly requested without Safety API configured.
+"""
+
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+from llama_stack.core.datatypes import Api
+from llama_stack.core.storage.datatypes import KVStoreReference, ResponsesStoreReference
+from llama_stack.providers.inline.agents.meta_reference import get_provider_impl
+from llama_stack.providers.inline.agents.meta_reference.config import (
+ AgentPersistenceConfig,
+ MetaReferenceAgentsImplConfig,
+)
+from llama_stack.providers.inline.agents.meta_reference.responses.utils import (
+ run_guardrails,
+)
+
+
+@pytest.fixture
+def mock_persistence_config():
+ """Create a mock persistence configuration."""
+ return AgentPersistenceConfig(
+ agent_state=KVStoreReference(
+ backend="kv_default",
+ namespace="agents",
+ ),
+ responses=ResponsesStoreReference(
+ backend="sql_default",
+ table_name="responses",
+ ),
+ )
+
+
+@pytest.fixture
+def mock_deps():
+ """Create mock dependencies for the agents provider."""
+ # Create mock APIs
+ inference_api = AsyncMock()
+ vector_io_api = AsyncMock()
+ tool_runtime_api = AsyncMock()
+ tool_groups_api = AsyncMock()
+ conversations_api = AsyncMock()
+ prompts_api = AsyncMock()
+ files_api = AsyncMock()
+
+ return {
+ Api.inference: inference_api,
+ Api.vector_io: vector_io_api,
+ Api.tool_runtime: tool_runtime_api,
+ Api.tool_groups: tool_groups_api,
+ Api.conversations: conversations_api,
+ Api.prompts: prompts_api,
+ Api.files: files_api,
+ }
+
+
+class TestProviderInitialization:
+ """Test provider initialization with different safety API configurations."""
+
+ async def test_initialization_with_safety_api_present(self, mock_persistence_config, mock_deps):
+ """Test successful initialization when Safety API is configured."""
+ config = MetaReferenceAgentsImplConfig(persistence=mock_persistence_config)
+
+ # Add safety API to deps
+ safety_api = AsyncMock()
+ mock_deps[Api.safety] = safety_api
+
+ # Mock the initialize method to avoid actual initialization
+ with patch(
+ "llama_stack.providers.inline.agents.meta_reference.agents.MetaReferenceAgentsImpl.initialize",
+ new_callable=AsyncMock,
+ ):
+ # Should not raise any exception
+ provider = await get_provider_impl(config, mock_deps, policy=[], telemetry_enabled=False)
+ assert provider is not None
+
+ async def test_initialization_without_safety_api(self, mock_persistence_config, mock_deps):
+ """Test successful initialization when Safety API is not configured."""
+ config = MetaReferenceAgentsImplConfig(persistence=mock_persistence_config)
+
+ # Safety API is NOT in mock_deps - provider should still start
+ # Mock the initialize method to avoid actual initialization
+ with patch(
+ "llama_stack.providers.inline.agents.meta_reference.agents.MetaReferenceAgentsImpl.initialize",
+ new_callable=AsyncMock,
+ ):
+ # Should not raise any exception
+ provider = await get_provider_impl(config, mock_deps, policy=[], telemetry_enabled=False)
+ assert provider is not None
+ assert provider.safety_api is None
+
+
+class TestGuardrailsFunctionality:
+ """Test run_guardrails function with optional safety API."""
+
+ async def test_run_guardrails_with_none_safety_api(self):
+ """Test that run_guardrails returns None when safety_api is None."""
+ result = await run_guardrails(safety_api=None, messages="test message", guardrail_ids=["llama-guard"])
+ assert result is None
+
+ async def test_run_guardrails_with_empty_messages(self):
+ """Test that run_guardrails returns None for empty messages."""
+ # Test with None safety API
+ result = await run_guardrails(safety_api=None, messages="", guardrail_ids=["llama-guard"])
+ assert result is None
+
+ # Test with mock safety API
+ mock_safety_api = AsyncMock()
+ result = await run_guardrails(safety_api=mock_safety_api, messages="", guardrail_ids=["llama-guard"])
+ assert result is None
+
+ async def test_run_guardrails_with_none_safety_api_ignores_guardrails(self):
+ """Test that guardrails are skipped when safety_api is None, even if guardrail_ids are provided."""
+ # Should not raise exception, just return None
+ result = await run_guardrails(
+ safety_api=None,
+ messages="potentially harmful content",
+ guardrail_ids=["llama-guard", "content-filter"],
+ )
+ assert result is None
+
+ async def test_create_response_rejects_guardrails_without_safety_api(self, mock_persistence_config, mock_deps):
+ """Test that create_openai_response raises error when guardrails requested but Safety API unavailable."""
+ from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
+ OpenAIResponsesImpl,
+ )
+ from llama_stack_api import ResponseGuardrailSpec
+
+ # Create OpenAIResponsesImpl with no safety API
+ with patch("llama_stack.providers.inline.agents.meta_reference.responses.openai_responses.ResponsesStore"):
+ impl = OpenAIResponsesImpl(
+ inference_api=mock_deps[Api.inference],
+ tool_groups_api=mock_deps[Api.tool_groups],
+ tool_runtime_api=mock_deps[Api.tool_runtime],
+ responses_store=MagicMock(),
+ vector_io_api=mock_deps[Api.vector_io],
+ safety_api=None, # No Safety API
+ conversations_api=mock_deps[Api.conversations],
+ prompts_api=mock_deps[Api.prompts],
+ files_api=mock_deps[Api.files],
+ )
+
+ # Test with string guardrail
+ with pytest.raises(ValueError) as exc_info:
+ await impl.create_openai_response(
+ input="test input",
+ model="test-model",
+ guardrails=["llama-guard"],
+ )
+ assert "Cannot process guardrails: Safety API is not configured" in str(exc_info.value)
+
+ # Test with ResponseGuardrailSpec
+ with pytest.raises(ValueError) as exc_info:
+ await impl.create_openai_response(
+ input="test input",
+ model="test-model",
+ guardrails=[ResponseGuardrailSpec(type="llama-guard")],
+ )
+ assert "Cannot process guardrails: Safety API is not configured" in str(exc_info.value)
+
+ async def test_create_response_succeeds_without_guardrails_and_no_safety_api(
+ self, mock_persistence_config, mock_deps
+ ):
+ """Test that create_openai_response works when no guardrails requested and Safety API unavailable."""
+ from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
+ OpenAIResponsesImpl,
+ )
+
+ # Create OpenAIResponsesImpl with no safety API
+ with (
+ patch("llama_stack.providers.inline.agents.meta_reference.responses.openai_responses.ResponsesStore"),
+ patch.object(OpenAIResponsesImpl, "_create_streaming_response", new_callable=AsyncMock) as mock_stream,
+ ):
+ # Mock the streaming response to return a simple async generator
+ async def mock_generator():
+ yield MagicMock()
+
+ mock_stream.return_value = mock_generator()
+
+ impl = OpenAIResponsesImpl(
+ inference_api=mock_deps[Api.inference],
+ tool_groups_api=mock_deps[Api.tool_groups],
+ tool_runtime_api=mock_deps[Api.tool_runtime],
+ responses_store=MagicMock(),
+ vector_io_api=mock_deps[Api.vector_io],
+ safety_api=None, # No Safety API
+ conversations_api=mock_deps[Api.conversations],
+ prompts_api=mock_deps[Api.prompts],
+ files_api=mock_deps[Api.files],
+ )
+
+ # Should not raise when no guardrails requested
+ # Note: This will still fail later in execution due to mocking, but should pass the validation
+ try:
+ await impl.create_openai_response(
+ input="test input",
+ model="test-model",
+ guardrails=None, # No guardrails
+ )
+ except Exception as e:
+ # Ensure the error is NOT about missing Safety API
+ assert "Cannot process guardrails: Safety API is not configured" not in str(e)
diff --git a/tests/unit/providers/batches/conftest.py b/tests/unit/providers/batches/conftest.py
index d161bf976..8ecfa99fb 100644
--- a/tests/unit/providers/batches/conftest.py
+++ b/tests/unit/providers/batches/conftest.py
@@ -13,9 +13,9 @@ from unittest.mock import AsyncMock
import pytest
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack.providers.inline.batches.reference.batches import ReferenceBatchesImpl
from llama_stack.providers.inline.batches.reference.config import ReferenceBatchesImplConfig
-from llama_stack.providers.utils.kvstore import kvstore_impl, register_kvstore_backends
@pytest.fixture
diff --git a/tests/unit/providers/files/conftest.py b/tests/unit/providers/files/conftest.py
index c64ecc3a3..f8959b5b7 100644
--- a/tests/unit/providers/files/conftest.py
+++ b/tests/unit/providers/files/conftest.py
@@ -9,8 +9,8 @@ import pytest
from moto import mock_aws
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.remote.files.s3 import S3FilesImplConfig, get_adapter_impl
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
class MockUploadFile:
diff --git a/tests/unit/providers/files/test_s3_files_auth.py b/tests/unit/providers/files/test_s3_files_auth.py
index e113611bd..49b33fd7b 100644
--- a/tests/unit/providers/files/test_s3_files_auth.py
+++ b/tests/unit/providers/files/test_s3_files_auth.py
@@ -18,11 +18,11 @@ async def test_listing_hides_other_users_file(s3_provider, sample_text_file):
user_a = User("user-a", {"roles": ["team-a"]})
user_b = User("user-b", {"roles": ["team-b"]})
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_a
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_b
listed = await s3_provider.openai_list_files()
assert all(f.id != uploaded.id for f in listed.data)
@@ -41,11 +41,11 @@ async def test_cannot_access_other_user_file(s3_provider, sample_text_file, op):
user_a = User("user-a", {"roles": ["team-a"]})
user_b = User("user-b", {"roles": ["team-b"]})
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_a
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_b
with pytest.raises(ResourceNotFoundError):
await op(s3_provider, uploaded.id)
@@ -56,11 +56,11 @@ async def test_shared_role_allows_listing(s3_provider, sample_text_file):
user_a = User("user-a", {"roles": ["shared-role"]})
user_b = User("user-b", {"roles": ["shared-role"]})
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_a
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_b
listed = await s3_provider.openai_list_files()
assert any(f.id == uploaded.id for f in listed.data)
@@ -79,10 +79,10 @@ async def test_shared_role_allows_access(s3_provider, sample_text_file, op):
user_x = User("user-x", {"roles": ["shared-role"]})
user_y = User("user-y", {"roles": ["shared-role"]})
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_x
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_y
await op(s3_provider, uploaded.id)
diff --git a/tests/unit/providers/inference/test_bedrock_adapter.py b/tests/unit/providers/inference/test_bedrock_adapter.py
index a20f2860a..2a1ca769b 100644
--- a/tests/unit/providers/inference/test_bedrock_adapter.py
+++ b/tests/unit/providers/inference/test_bedrock_adapter.py
@@ -40,8 +40,8 @@ def test_api_key_from_header_overrides_config():
"""Test API key from request header overrides config via client property"""
config = BedrockConfig(api_key="config-key", region_name="us-east-1")
adapter = BedrockInferenceAdapter(config=config)
- adapter.provider_data_api_key_field = "aws_bedrock_api_key"
- adapter.get_request_provider_data = MagicMock(return_value=SimpleNamespace(aws_bedrock_api_key="header-key"))
+ adapter.provider_data_api_key_field = "aws_bearer_token_bedrock"
+ adapter.get_request_provider_data = MagicMock(return_value=SimpleNamespace(aws_bearer_token_bedrock="header-key"))
# The client property is where header override happens (in OpenAIMixin)
assert adapter.client.api_key == "header-key"
diff --git a/tests/unit/providers/inference/test_bedrock_config.py b/tests/unit/providers/inference/test_bedrock_config.py
index 4c1fd56a2..622080426 100644
--- a/tests/unit/providers/inference/test_bedrock_config.py
+++ b/tests/unit/providers/inference/test_bedrock_config.py
@@ -9,7 +9,7 @@ from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig
def test_bedrock_config_defaults_no_env(monkeypatch):
"""Test BedrockConfig defaults when env vars are not set"""
- monkeypatch.delenv("AWS_BEDROCK_API_KEY", raising=False)
+ monkeypatch.delenv("AWS_BEARER_TOKEN_BEDROCK", raising=False)
monkeypatch.delenv("AWS_DEFAULT_REGION", raising=False)
config = BedrockConfig()
assert config.auth_credential is None
@@ -35,5 +35,5 @@ def test_bedrock_config_sample():
sample = BedrockConfig.sample_run_config()
assert "api_key" in sample
assert "region_name" in sample
- assert sample["api_key"] == "${env.AWS_BEDROCK_API_KEY:=}"
+ assert sample["api_key"] == "${env.AWS_BEARER_TOKEN_BEDROCK:=}"
assert sample["region_name"] == "${env.AWS_DEFAULT_REGION:=us-east-2}"
diff --git a/tests/unit/providers/inference/test_inference_client_caching.py b/tests/unit/providers/inference/test_inference_client_caching.py
index aa3a2c77a..6ddf790af 100644
--- a/tests/unit/providers/inference/test_inference_client_caching.py
+++ b/tests/unit/providers/inference/test_inference_client_caching.py
@@ -120,7 +120,7 @@ from llama_stack.providers.remote.inference.watsonx.watsonx import WatsonXInfere
VLLMInferenceAdapter,
"llama_stack.providers.remote.inference.vllm.VLLMProviderDataValidator",
{
- "url": "http://fake",
+ "base_url": "http://fake",
},
),
],
@@ -153,7 +153,7 @@ def test_litellm_provider_data_used(config_cls, adapter_cls, provider_data_valid
"""Validate data for LiteLLM-based providers. Similar to test_openai_provider_data_used, but without the
assumption that there is an OpenAI-compatible client object."""
- inference_adapter = adapter_cls(config=config_cls())
+ inference_adapter = adapter_cls(config=config_cls(base_url="http://fake"))
inference_adapter.__provider_spec__ = MagicMock()
inference_adapter.__provider_spec__.provider_data_validator = provider_data_validator
diff --git a/tests/unit/providers/inference/test_remote_vllm.py b/tests/unit/providers/inference/test_remote_vllm.py
index 958895cc4..0cf8ed306 100644
--- a/tests/unit/providers/inference/test_remote_vllm.py
+++ b/tests/unit/providers/inference/test_remote_vllm.py
@@ -40,7 +40,7 @@ from llama_stack_api import (
@pytest.fixture(scope="function")
async def vllm_inference_adapter():
- config = VLLMInferenceAdapterConfig(url="http://mocked.localhost:12345")
+ config = VLLMInferenceAdapterConfig(base_url="http://mocked.localhost:12345")
inference_adapter = VLLMInferenceAdapter(config=config)
inference_adapter.model_store = AsyncMock()
await inference_adapter.initialize()
@@ -204,7 +204,7 @@ async def test_vllm_completion_extra_body():
via extra_body to the underlying OpenAI client through the InferenceRouter.
"""
# Set up the vLLM adapter
- config = VLLMInferenceAdapterConfig(url="http://mocked.localhost:12345")
+ config = VLLMInferenceAdapterConfig(base_url="http://mocked.localhost:12345")
vllm_adapter = VLLMInferenceAdapter(config=config)
vllm_adapter.__provider_id__ = "vllm"
await vllm_adapter.initialize()
@@ -277,7 +277,7 @@ async def test_vllm_chat_completion_extra_body():
via extra_body to the underlying OpenAI client through the InferenceRouter for chat completion.
"""
# Set up the vLLM adapter
- config = VLLMInferenceAdapterConfig(url="http://mocked.localhost:12345")
+ config = VLLMInferenceAdapterConfig(base_url="http://mocked.localhost:12345")
vllm_adapter = VLLMInferenceAdapter(config=config)
vllm_adapter.__provider_id__ = "vllm"
await vllm_adapter.initialize()
diff --git a/tests/unit/providers/nvidia/test_rerank_inference.py b/tests/unit/providers/nvidia/test_rerank_inference.py
index ee62910b8..4ad9dc766 100644
--- a/tests/unit/providers/nvidia/test_rerank_inference.py
+++ b/tests/unit/providers/nvidia/test_rerank_inference.py
@@ -146,7 +146,7 @@ async def test_hosted_model_not_in_endpoint_mapping():
async def test_self_hosted_ignores_endpoint():
adapter = create_adapter(
- config=NVIDIAConfig(url="http://localhost:8000", api_key=None),
+ config=NVIDIAConfig(base_url="http://localhost:8000", api_key=None),
rerank_endpoints={"test-model": "https://model.endpoint/rerank"}, # This should be ignored for self-hosted.
)
mock_session = MockSession(MockResponse())
diff --git a/tests/unit/providers/test_configs.py b/tests/unit/providers/test_configs.py
index 867cfffbc..b4ba78394 100644
--- a/tests/unit/providers/test_configs.py
+++ b/tests/unit/providers/test_configs.py
@@ -4,8 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
+from typing import get_args, get_origin
+
import pytest
-from pydantic import BaseModel
+from pydantic import BaseModel, HttpUrl
from llama_stack.core.distribution import get_provider_registry, providable_apis
from llama_stack.core.utils.dynamic import instantiate_class_type
@@ -41,3 +43,55 @@ class TestProviderConfigurations:
sample_config = config_type.sample_run_config(__distro_dir__="foobarbaz")
assert isinstance(sample_config, dict), f"{config_class_name}.sample_run_config() did not return a dict"
+
+ def test_remote_inference_url_standardization(self):
+ """Verify all remote inference providers use standardized base_url configuration."""
+ provider_registry = get_provider_registry()
+ inference_providers = provider_registry.get("inference", {})
+
+ # Filter for remote providers only
+ remote_providers = {k: v for k, v in inference_providers.items() if k.startswith("remote::")}
+
+ failures = []
+ for provider_type, provider_spec in remote_providers.items():
+ try:
+ config_class_name = provider_spec.config_class
+ config_type = instantiate_class_type(config_class_name)
+
+ # Check that config has base_url field (not url)
+ if hasattr(config_type, "model_fields"):
+ fields = config_type.model_fields
+
+ # Should NOT have 'url' field (old pattern)
+ if "url" in fields:
+ failures.append(
+ f"{provider_type}: Uses deprecated 'url' field instead of 'base_url'. "
+ f"Please rename to 'base_url' for consistency."
+ )
+
+ # Should have 'base_url' field with HttpUrl | None type
+ if "base_url" in fields:
+ field_info = fields["base_url"]
+ annotation = field_info.annotation
+
+ # Check if it's HttpUrl or HttpUrl | None
+ # get_origin() returns Union for (X | Y), None for plain types
+ # get_args() returns the types inside Union, e.g. (HttpUrl, NoneType)
+ is_valid = False
+ if get_origin(annotation) is not None: # It's a Union/Optional
+ if HttpUrl in get_args(annotation):
+ is_valid = True
+ elif annotation == HttpUrl: # Plain HttpUrl without | None
+ is_valid = True
+
+ if not is_valid:
+ failures.append(
+ f"{provider_type}: base_url field has incorrect type annotation. "
+ f"Expected 'HttpUrl | None', got '{annotation}'"
+ )
+
+ except Exception as e:
+ failures.append(f"{provider_type}: Error checking URL standardization: {str(e)}")
+
+ if failures:
+ pytest.fail("URL standardization violations found:\n" + "\n".join(f" - {f}" for f in failures))
diff --git a/tests/unit/providers/utils/inference/test_openai_mixin.py b/tests/unit/providers/utils/inference/test_openai_mixin.py
index 5b13a75f4..02d44f2ba 100644
--- a/tests/unit/providers/utils/inference/test_openai_mixin.py
+++ b/tests/unit/providers/utils/inference/test_openai_mixin.py
@@ -15,7 +15,14 @@ from pydantic import BaseModel, Field
from llama_stack.core.request_headers import request_provider_data_context
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
-from llama_stack_api import Model, ModelType, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
+from llama_stack_api import (
+ Model,
+ ModelType,
+ OpenAIChatCompletionRequestWithExtraBody,
+ OpenAICompletionRequestWithExtraBody,
+ OpenAIEmbeddingsRequestWithExtraBody,
+ OpenAIUserMessageParam,
+)
class OpenAIMixinImpl(OpenAIMixin):
@@ -834,3 +841,96 @@ class TestOpenAIMixinProviderDataApiKey:
error_message = str(exc_info.value)
assert "test_api_key" in error_message
assert "x-llamastack-provider-data" in error_message
+
+
+class TestOpenAIMixinAllowedModelsInference:
+ """Test cases for allowed_models enforcement during inference requests"""
+
+ async def test_inference_with_allowed_models(self, mixin, mock_client_context):
+ """Test that all inference methods succeed with allowed models"""
+ mixin.config.allowed_models = ["gpt-4", "text-davinci-003", "text-embedding-ada-002"]
+
+ mock_client = MagicMock()
+ mock_client.chat.completions.create = AsyncMock(return_value=MagicMock())
+ mock_client.completions.create = AsyncMock(return_value=MagicMock())
+ mock_embedding_response = MagicMock()
+ mock_embedding_response.data = [MagicMock(embedding=[0.1, 0.2, 0.3])]
+ mock_embedding_response.usage = MagicMock(prompt_tokens=5, total_tokens=5)
+ mock_client.embeddings.create = AsyncMock(return_value=mock_embedding_response)
+
+ with mock_client_context(mixin, mock_client):
+ # Test chat completion
+ await mixin.openai_chat_completion(
+ OpenAIChatCompletionRequestWithExtraBody(
+ model="gpt-4", messages=[OpenAIUserMessageParam(role="user", content="Hello")]
+ )
+ )
+ mock_client.chat.completions.create.assert_called_once()
+
+ # Test completion
+ await mixin.openai_completion(
+ OpenAICompletionRequestWithExtraBody(model="text-davinci-003", prompt="Hello")
+ )
+ mock_client.completions.create.assert_called_once()
+
+ # Test embeddings
+ await mixin.openai_embeddings(
+ OpenAIEmbeddingsRequestWithExtraBody(model="text-embedding-ada-002", input="test text")
+ )
+ mock_client.embeddings.create.assert_called_once()
+
+ async def test_inference_with_disallowed_models(self, mixin, mock_client_context):
+ """Test that all inference methods fail with disallowed models"""
+ mixin.config.allowed_models = ["gpt-4"]
+
+ mock_client = MagicMock()
+
+ with mock_client_context(mixin, mock_client):
+ # Test chat completion with disallowed model
+ with pytest.raises(ValueError, match="Model 'gpt-4-turbo' is not in the allowed models list"):
+ await mixin.openai_chat_completion(
+ OpenAIChatCompletionRequestWithExtraBody(
+ model="gpt-4-turbo", messages=[OpenAIUserMessageParam(role="user", content="Hello")]
+ )
+ )
+
+ # Test completion with disallowed model
+ with pytest.raises(ValueError, match="Model 'text-davinci-002' is not in the allowed models list"):
+ await mixin.openai_completion(
+ OpenAICompletionRequestWithExtraBody(model="text-davinci-002", prompt="Hello")
+ )
+
+ # Test embeddings with disallowed model
+ with pytest.raises(ValueError, match="Model 'text-embedding-3-large' is not in the allowed models list"):
+ await mixin.openai_embeddings(
+ OpenAIEmbeddingsRequestWithExtraBody(model="text-embedding-3-large", input="test text")
+ )
+
+ mock_client.chat.completions.create.assert_not_called()
+ mock_client.completions.create.assert_not_called()
+ mock_client.embeddings.create.assert_not_called()
+
+ async def test_inference_with_no_restrictions(self, mixin, mock_client_context):
+ """Test that inference succeeds when allowed_models is None or empty list blocks all"""
+ # Test with None (no restrictions)
+ assert mixin.config.allowed_models is None
+ mock_client = MagicMock()
+ mock_client.chat.completions.create = AsyncMock(return_value=MagicMock())
+
+ with mock_client_context(mixin, mock_client):
+ await mixin.openai_chat_completion(
+ OpenAIChatCompletionRequestWithExtraBody(
+ model="any-model", messages=[OpenAIUserMessageParam(role="user", content="Hello")]
+ )
+ )
+ mock_client.chat.completions.create.assert_called_once()
+
+ # Test with empty list (blocks all models)
+ mixin.config.allowed_models = []
+ with mock_client_context(mixin, mock_client):
+ with pytest.raises(ValueError, match="Model 'gpt-4' is not in the allowed models list"):
+ await mixin.openai_chat_completion(
+ OpenAIChatCompletionRequestWithExtraBody(
+ model="gpt-4", messages=[OpenAIUserMessageParam(role="user", content="Hello")]
+ )
+ )
diff --git a/tests/unit/providers/vector_io/conftest.py b/tests/unit/providers/vector_io/conftest.py
index 6408e25ab..b4ea77c0a 100644
--- a/tests/unit/providers/vector_io/conftest.py
+++ b/tests/unit/providers/vector_io/conftest.py
@@ -11,13 +11,13 @@ import numpy as np
import pytest
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore import register_kvstore_backends
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.inline.vector_io.faiss.faiss import FaissIndex, FaissVectorIOAdapter
from llama_stack.providers.inline.vector_io.sqlite_vec import SQLiteVectorIOConfig
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import SQLiteVecIndex, SQLiteVecVectorIOAdapter
from llama_stack.providers.remote.vector_io.pgvector.config import PGVectorVectorIOConfig
from llama_stack.providers.remote.vector_io.pgvector.pgvector import PGVectorIndex, PGVectorVectorIOAdapter
-from llama_stack.providers.utils.kvstore import register_kvstore_backends
from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, VectorStore
EMBEDDING_DIMENSION = 768
@@ -279,7 +279,7 @@ async def pgvector_vec_adapter(unique_kvstore_config, mock_inference_api, embedd
) as mock_check_version:
mock_check_version.return_value = "0.5.1"
- with patch("llama_stack.providers.utils.kvstore.kvstore_impl") as mock_kvstore_impl:
+ with patch("llama_stack.core.storage.kvstore.kvstore_impl") as mock_kvstore_impl:
mock_kvstore = AsyncMock()
mock_kvstore_impl.return_value = mock_kvstore
diff --git a/tests/unit/providers/vector_io/test_vector_utils.py b/tests/unit/providers/vector_io/test_vector_utils.py
index 7f6b4af79..3e6b2971f 100644
--- a/tests/unit/providers/vector_io/test_vector_utils.py
+++ b/tests/unit/providers/vector_io/test_vector_utils.py
@@ -5,7 +5,7 @@
# the root directory of this source tree.
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
-from llama_stack_api import Chunk, ChunkMetadata
+from llama_stack_api import Chunk, ChunkMetadata, VectorStoreFileObject
# This test is a unit test for the chunk_utils.py helpers. This should only contain
# tests which are specific to this file. More general (API-level) tests should be placed in
@@ -78,3 +78,77 @@ def test_chunk_serialization():
serialized_chunk = chunk.model_dump()
assert serialized_chunk["chunk_id"] == "test-chunk-id"
assert "chunk_id" in serialized_chunk
+
+
+def test_vector_store_file_object_attributes_validation():
+ """Test VectorStoreFileObject validates and sanitizes attributes at input boundary."""
+ # Test with metadata containing lists, nested dicts, and primitives
+ from llama_stack_api.vector_io import VectorStoreChunkingStrategyAuto
+
+ file_obj = VectorStoreFileObject(
+ id="file-123",
+ attributes={
+ "tags": ["transformers", "h100-compatible", "region:us"], # List -> string
+ "model_name": "granite-3.3-8b", # String preserved
+ "score": 0.95, # Float preserved
+ "active": True, # Bool preserved
+ "count": 42, # Int -> float
+ "nested": {"key": "value"}, # Dict filtered out
+ },
+ chunking_strategy=VectorStoreChunkingStrategyAuto(),
+ created_at=1234567890,
+ status="completed",
+ vector_store_id="vs-123",
+ )
+
+ # Lists converted to comma-separated strings
+ assert file_obj.attributes["tags"] == "transformers, h100-compatible, region:us"
+ # Primitives preserved
+ assert file_obj.attributes["model_name"] == "granite-3.3-8b"
+ assert file_obj.attributes["score"] == 0.95
+ assert file_obj.attributes["active"] is True
+ assert file_obj.attributes["count"] == 42.0 # int -> float
+ # Complex types filtered out
+ assert "nested" not in file_obj.attributes
+
+
+def test_vector_store_file_object_attributes_constraints():
+ """Test VectorStoreFileObject enforces OpenAPI constraints on attributes."""
+ from llama_stack_api.vector_io import VectorStoreChunkingStrategyAuto
+
+ # Test max 16 properties
+ many_attrs = {f"key{i}": f"value{i}" for i in range(20)}
+ file_obj = VectorStoreFileObject(
+ id="file-123",
+ attributes=many_attrs,
+ chunking_strategy=VectorStoreChunkingStrategyAuto(),
+ created_at=1234567890,
+ status="completed",
+ vector_store_id="vs-123",
+ )
+ assert len(file_obj.attributes) == 16 # Max 16 properties
+
+ # Test max 64 char keys are filtered
+ long_key_attrs = {"a" * 65: "value", "valid_key": "value"}
+ file_obj = VectorStoreFileObject(
+ id="file-124",
+ attributes=long_key_attrs,
+ chunking_strategy=VectorStoreChunkingStrategyAuto(),
+ created_at=1234567890,
+ status="completed",
+ vector_store_id="vs-123",
+ )
+ assert "a" * 65 not in file_obj.attributes
+ assert "valid_key" in file_obj.attributes
+
+ # Test max 512 char string values are truncated
+ long_value_attrs = {"key": "x" * 600}
+ file_obj = VectorStoreFileObject(
+ id="file-125",
+ attributes=long_value_attrs,
+ chunking_strategy=VectorStoreChunkingStrategyAuto(),
+ created_at=1234567890,
+ status="completed",
+ vector_store_id="vs-123",
+ )
+ assert len(file_obj.attributes["key"]) == 512
diff --git a/tests/unit/registry/test_registry.py b/tests/unit/registry/test_registry.py
index 1b5032782..2b32de833 100644
--- a/tests/unit/registry/test_registry.py
+++ b/tests/unit/registry/test_registry.py
@@ -9,12 +9,12 @@ import pytest
from llama_stack.core.datatypes import VectorStoreWithOwner
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack.core.store.registry import (
KEY_FORMAT,
CachedDiskDistributionRegistry,
DiskDistributionRegistry,
)
-from llama_stack.providers.utils.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack_api import Model, VectorStore
diff --git a/tests/unit/server/test_quota.py b/tests/unit/server/test_quota.py
index 0939414dd..cd8c38eed 100644
--- a/tests/unit/server/test_quota.py
+++ b/tests/unit/server/test_quota.py
@@ -15,7 +15,7 @@ from starlette.middleware.base import BaseHTTPMiddleware
from llama_stack.core.datatypes import QuotaConfig, QuotaPeriod
from llama_stack.core.server.quota import QuotaMiddleware
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
-from llama_stack.providers.utils.kvstore import register_kvstore_backends
+from llama_stack.core.storage.kvstore import register_kvstore_backends
@pytest.fixture
diff --git a/tests/unit/server/test_resolver.py b/tests/unit/server/test_resolver.py
index 8f8a61ea7..a1b03f630 100644
--- a/tests/unit/server/test_resolver.py
+++ b/tests/unit/server/test_resolver.py
@@ -24,8 +24,8 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
-from llama_stack.providers.utils.kvstore import register_kvstore_backends
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack.core.storage.kvstore import register_kvstore_backends
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import Inference, InlineProviderSpec, ProviderSpec
diff --git a/tests/unit/utils/inference/test_inference_store.py b/tests/unit/utils/inference/test_inference_store.py
index bdcc529ce..22d4ec1e5 100644
--- a/tests/unit/utils/inference/test_inference_store.py
+++ b/tests/unit/utils/inference/test_inference_store.py
@@ -9,8 +9,8 @@ import time
import pytest
from llama_stack.core.storage.datatypes import InferenceStoreReference, SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.utils.inference.inference_store import InferenceStore
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
diff --git a/tests/unit/utils/kvstore/test_sqlite_memory.py b/tests/unit/utils/kvstore/test_sqlite_memory.py
index a31377306..1aaf57b44 100644
--- a/tests/unit/utils/kvstore/test_sqlite_memory.py
+++ b/tests/unit/utils/kvstore/test_sqlite_memory.py
@@ -5,8 +5,8 @@
# the root directory of this source tree.
-from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
-from llama_stack.providers.utils.kvstore.sqlite.sqlite import SqliteKVStoreImpl
+from llama_stack.core.storage.kvstore.config import SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore.sqlite.sqlite import SqliteKVStoreImpl
async def test_memory_kvstore_persistence_behavior():
diff --git a/tests/unit/utils/responses/test_responses_store.py b/tests/unit/utils/responses/test_responses_store.py
index 8c108d9c1..a71fb39f6 100644
--- a/tests/unit/utils/responses/test_responses_store.py
+++ b/tests/unit/utils/responses/test_responses_store.py
@@ -11,8 +11,8 @@ from uuid import uuid4
import pytest
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import OpenAIMessageParam, OpenAIResponseInput, OpenAIResponseObject, OpenAIUserMessageParam, Order
diff --git a/tests/unit/utils/sqlstore/test_sqlstore.py b/tests/unit/utils/sqlstore/test_sqlstore.py
index d7ba0dc89..421e3b69d 100644
--- a/tests/unit/utils/sqlstore/test_sqlstore.py
+++ b/tests/unit/utils/sqlstore/test_sqlstore.py
@@ -9,9 +9,9 @@ from tempfile import TemporaryDirectory
import pytest
-from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
-from llama_stack.providers.utils.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
-from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
+from llama_stack.core.storage.sqlstore.sqlstore import SqliteSqlStoreConfig
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
async def test_sqlite_sqlstore():
diff --git a/tests/unit/utils/test_authorized_sqlstore.py b/tests/unit/utils/test_authorized_sqlstore.py
index d85e784a9..e9a6b511b 100644
--- a/tests/unit/utils/test_authorized_sqlstore.py
+++ b/tests/unit/utils/test_authorized_sqlstore.py
@@ -10,13 +10,13 @@ from unittest.mock import patch
from llama_stack.core.access_control.access_control import default_policy, is_action_allowed
from llama_stack.core.access_control.datatypes import Action
from llama_stack.core.datatypes import User
-from llama_stack.providers.utils.sqlstore.api import ColumnType
-from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore, SqlRecord
-from llama_stack.providers.utils.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
-from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore, SqlRecord
+from llama_stack.core.storage.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
+from llama_stack.core.storage.sqlstore.sqlstore import SqliteSqlStoreConfig
+from llama_stack_api.internal.sqlstore import ColumnType
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_authorized_fetch_with_where_sql_access_control(mock_get_authenticated_user):
"""Test that fetch_all works correctly with where_sql for access control"""
with TemporaryDirectory() as tmp_dir:
@@ -78,7 +78,7 @@ async def test_authorized_fetch_with_where_sql_access_control(mock_get_authentic
assert row["title"] == "User Document"
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_sql_policy_consistency(mock_get_authenticated_user):
"""Test that SQL WHERE clause logic exactly matches is_action_allowed policy logic"""
with TemporaryDirectory() as tmp_dir:
@@ -164,7 +164,7 @@ async def test_sql_policy_consistency(mock_get_authenticated_user):
)
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_authorized_store_user_attribute_capture(mock_get_authenticated_user):
"""Test that user attributes are properly captured during insert"""
with TemporaryDirectory() as tmp_dir:
diff --git a/uv.lock b/uv.lock
index a5eded2fd..93ad53e67 100644
--- a/uv.lock
+++ b/uv.lock
@@ -2165,10 +2165,8 @@ requires-dist = [
{ name = "python-dotenv" },
{ name = "python-multipart", specifier = ">=0.0.20" },
{ name = "pyyaml", specifier = ">=6.0" },
- { name = "pyyaml", specifier = ">=6.0.2" },
{ name = "rich" },
{ name = "sqlalchemy", extras = ["asyncio"], specifier = ">=2.0.41" },
- { name = "starlette" },
{ name = "starlette", specifier = ">=0.49.1" },
{ name = "termcolor" },
{ name = "tiktoken" },