From d5cd0eea14a3e061bc9a6e48bd606190ebaf907b Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Wed, 19 Nov 2025 11:44:28 -0500 Subject: [PATCH 01/14] feat!: standardize base_url for inference (#4177) # What does this PR do? Completes #3732 by removing runtime URL transformations and requiring users to provide full URLs in configuration. All providers now use 'base_url' consistently and respect the exact URL provided without appending paths like /v1 or /openai/v1 at runtime. BREAKING CHANGE: Users must update configs to include full URL paths (e.g., http://localhost:11434/v1 instead of http://localhost:11434). Closes #3732 ## Test Plan Existing tests should pass even with the URL changes, due to default URLs being altered. Add unit test to enforce URL standardization across remote inference providers (verifies all use 'base_url' field with HttpUrl | None type) Signed-off-by: Charlie Doern --- .../docs/providers/inference/remote_azure.mdx | 4 +- .../providers/inference/remote_cerebras.mdx | 4 +- .../providers/inference/remote_databricks.mdx | 4 +- .../providers/inference/remote_fireworks.mdx | 4 +- docs/docs/providers/inference/remote_groq.mdx | 4 +- .../inference/remote_llama-openai-compat.mdx | 4 +- .../providers/inference/remote_nvidia.mdx | 6 +- .../providers/inference/remote_ollama.mdx | 4 +- .../providers/inference/remote_openai.mdx | 2 +- .../inference/remote_passthrough.mdx | 4 +- .../providers/inference/remote_runpod.mdx | 4 +- .../providers/inference/remote_sambanova.mdx | 4 +- docs/docs/providers/inference/remote_tgi.mdx | 4 +- .../providers/inference/remote_together.mdx | 4 +- docs/docs/providers/inference/remote_vllm.mdx | 4 +- .../providers/inference/remote_watsonx.mdx | 4 +- scripts/docker.sh | 4 +- scripts/install.sh | 2 +- .../ci-tests/run-with-postgres-store.yaml | 21 ++++--- .../distributions/ci-tests/run.yaml | 21 ++++--- .../distributions/nvidia/run-with-safety.yaml | 3 +- src/llama_stack/distributions/nvidia/run.yaml | 3 +- .../distributions/open-benchmark/run.yaml | 4 +- .../distributions/postgres-demo/run.yaml | 2 +- .../starter-gpu/run-with-postgres-store.yaml | 21 ++++--- .../distributions/starter-gpu/run.yaml | 21 ++++--- .../starter/run-with-postgres-store.yaml | 21 ++++--- .../distributions/starter/run.yaml | 21 ++++--- .../distributions/watsonx/run.yaml | 2 +- .../providers/remote/inference/azure/azure.py | 4 +- .../remote/inference/azure/config.py | 9 +-- .../remote/inference/cerebras/cerebras.py | 4 +- .../remote/inference/cerebras/config.py | 8 +-- .../remote/inference/databricks/config.py | 10 ++-- .../remote/inference/databricks/databricks.py | 10 +++- .../remote/inference/fireworks/config.py | 8 +-- .../remote/inference/fireworks/fireworks.py | 2 +- .../providers/remote/inference/groq/config.py | 8 +-- .../providers/remote/inference/groq/groq.py | 2 +- .../inference/llama_openai_compat/config.py | 8 +-- .../inference/llama_openai_compat/llama.py | 2 +- .../remote/inference/nvidia/config.py | 16 ++---- .../remote/inference/nvidia/nvidia.py | 4 +- .../remote/inference/nvidia/utils.py | 2 +- .../remote/inference/ollama/config.py | 12 ++-- .../remote/inference/ollama/ollama.py | 12 +++- .../remote/inference/openai/config.py | 6 +- .../remote/inference/openai/openai.py | 2 +- .../remote/inference/passthrough/config.py | 8 +-- .../inference/passthrough/passthrough.py | 4 +- .../remote/inference/runpod/config.py | 6 +- .../remote/inference/runpod/runpod.py | 2 +- .../remote/inference/sambanova/config.py | 8 +-- .../remote/inference/sambanova/sambanova.py | 2 +- .../providers/remote/inference/tgi/config.py | 11 ++-- .../providers/remote/inference/tgi/tgi.py | 20 ++++--- .../remote/inference/together/config.py | 8 +-- .../remote/inference/together/together.py | 3 +- .../providers/remote/inference/vllm/config.py | 8 +-- .../providers/remote/inference/vllm/vllm.py | 6 +- .../remote/inference/watsonx/config.py | 6 +- .../remote/inference/watsonx/watsonx.py | 4 +- tests/integration/suites.py | 6 +- .../test_inference_client_caching.py | 4 +- .../providers/inference/test_remote_vllm.py | 6 +- .../providers/nvidia/test_rerank_inference.py | 2 +- tests/unit/providers/test_configs.py | 56 ++++++++++++++++++- 67 files changed, 282 insertions(+), 227 deletions(-) diff --git a/docs/docs/providers/inference/remote_azure.mdx b/docs/docs/providers/inference/remote_azure.mdx index fd22b157e..0382b42d7 100644 --- a/docs/docs/providers/inference/remote_azure.mdx +++ b/docs/docs/providers/inference/remote_azure.mdx @@ -24,7 +24,7 @@ https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `api_base` | `HttpUrl` | No | | Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com) | +| `base_url` | `HttpUrl \| None` | No | | Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com/openai/v1) | | `api_version` | `str \| None` | No | | Azure API version for Azure (e.g., 2024-12-01-preview) | | `api_type` | `str \| None` | No | azure | Azure API type for Azure (e.g., azure) | @@ -32,7 +32,7 @@ https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview ```yaml api_key: ${env.AZURE_API_KEY:=} -api_base: ${env.AZURE_API_BASE:=} +base_url: ${env.AZURE_API_BASE:=} api_version: ${env.AZURE_API_VERSION:=} api_type: ${env.AZURE_API_TYPE:=} ``` diff --git a/docs/docs/providers/inference/remote_cerebras.mdx b/docs/docs/providers/inference/remote_cerebras.mdx index 1fb9530bb..9fd390a29 100644 --- a/docs/docs/providers/inference/remote_cerebras.mdx +++ b/docs/docs/providers/inference/remote_cerebras.mdx @@ -17,11 +17,11 @@ Cerebras inference provider for running models on Cerebras Cloud platform. | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `base_url` | `str` | No | https://api.cerebras.ai | Base URL for the Cerebras API | +| `base_url` | `HttpUrl \| None` | No | https://api.cerebras.ai/v1 | Base URL for the Cerebras API | ## Sample Configuration ```yaml -base_url: https://api.cerebras.ai +base_url: https://api.cerebras.ai/v1 api_key: ${env.CEREBRAS_API_KEY:=} ``` diff --git a/docs/docs/providers/inference/remote_databricks.mdx b/docs/docs/providers/inference/remote_databricks.mdx index 7a926baf4..d50c52958 100644 --- a/docs/docs/providers/inference/remote_databricks.mdx +++ b/docs/docs/providers/inference/remote_databricks.mdx @@ -17,11 +17,11 @@ Databricks inference provider for running models on Databricks' unified analytic | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_token` | `SecretStr \| None` | No | | The Databricks API token | -| `url` | `str \| None` | No | | The URL for the Databricks model serving endpoint | +| `base_url` | `HttpUrl \| None` | No | | The URL for the Databricks model serving endpoint (should include /serving-endpoints path) | ## Sample Configuration ```yaml -url: ${env.DATABRICKS_HOST:=} +base_url: ${env.DATABRICKS_HOST:=} api_token: ${env.DATABRICKS_TOKEN:=} ``` diff --git a/docs/docs/providers/inference/remote_fireworks.mdx b/docs/docs/providers/inference/remote_fireworks.mdx index 7db74efc4..a67403a9b 100644 --- a/docs/docs/providers/inference/remote_fireworks.mdx +++ b/docs/docs/providers/inference/remote_fireworks.mdx @@ -17,11 +17,11 @@ Fireworks AI inference provider for Llama models and other AI models on the Fire | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `url` | `str` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server | +| `base_url` | `HttpUrl \| None` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server | ## Sample Configuration ```yaml -url: https://api.fireworks.ai/inference/v1 +base_url: https://api.fireworks.ai/inference/v1 api_key: ${env.FIREWORKS_API_KEY:=} ``` diff --git a/docs/docs/providers/inference/remote_groq.mdx b/docs/docs/providers/inference/remote_groq.mdx index 3ebd6f907..17acd3140 100644 --- a/docs/docs/providers/inference/remote_groq.mdx +++ b/docs/docs/providers/inference/remote_groq.mdx @@ -17,11 +17,11 @@ Groq inference provider for ultra-fast inference using Groq's LPU technology. | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `url` | `str` | No | https://api.groq.com | The URL for the Groq AI server | +| `base_url` | `HttpUrl \| None` | No | https://api.groq.com/openai/v1 | The URL for the Groq AI server | ## Sample Configuration ```yaml -url: https://api.groq.com +base_url: https://api.groq.com/openai/v1 api_key: ${env.GROQ_API_KEY:=} ``` diff --git a/docs/docs/providers/inference/remote_llama-openai-compat.mdx b/docs/docs/providers/inference/remote_llama-openai-compat.mdx index f67f40909..69e90b2ac 100644 --- a/docs/docs/providers/inference/remote_llama-openai-compat.mdx +++ b/docs/docs/providers/inference/remote_llama-openai-compat.mdx @@ -17,11 +17,11 @@ Llama OpenAI-compatible provider for using Llama models with OpenAI API format. | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `openai_compat_api_base` | `str` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server | +| `base_url` | `HttpUrl \| None` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server | ## Sample Configuration ```yaml -openai_compat_api_base: https://api.llama.com/compat/v1/ +base_url: https://api.llama.com/compat/v1/ api_key: ${env.LLAMA_API_KEY} ``` diff --git a/docs/docs/providers/inference/remote_nvidia.mdx b/docs/docs/providers/inference/remote_nvidia.mdx index 6646d8b00..a890bc57f 100644 --- a/docs/docs/providers/inference/remote_nvidia.mdx +++ b/docs/docs/providers/inference/remote_nvidia.mdx @@ -17,15 +17,13 @@ NVIDIA inference provider for accessing NVIDIA NIM models and AI services. | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `url` | `str` | No | https://integrate.api.nvidia.com | A base url for accessing the NVIDIA NIM | +| `base_url` | `HttpUrl \| None` | No | https://integrate.api.nvidia.com/v1 | A base url for accessing the NVIDIA NIM | | `timeout` | `int` | No | 60 | Timeout for the HTTP requests | -| `append_api_version` | `bool` | No | True | When set to false, the API version will not be appended to the base_url. By default, it is true. | | `rerank_model_to_url` | `dict[str, str]` | No | `{'nv-rerank-qa-mistral-4b:1': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking', 'nvidia/nv-rerankqa-mistral-4b-v3': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/nv-rerankqa-mistral-4b-v3/reranking', 'nvidia/llama-3.2-nv-rerankqa-1b-v2': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/llama-3_2-nv-rerankqa-1b-v2/reranking'}` | Mapping of rerank model identifiers to their API endpoints. | ## Sample Configuration ```yaml -url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} +base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1} api_key: ${env.NVIDIA_API_KEY:=} -append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} ``` diff --git a/docs/docs/providers/inference/remote_ollama.mdx b/docs/docs/providers/inference/remote_ollama.mdx index 497bfed52..f9be84add 100644 --- a/docs/docs/providers/inference/remote_ollama.mdx +++ b/docs/docs/providers/inference/remote_ollama.mdx @@ -16,10 +16,10 @@ Ollama inference provider for running local models through the Ollama runtime. |-------|------|----------|---------|-------------| | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | -| `url` | `str` | No | http://localhost:11434 | | +| `base_url` | `HttpUrl \| None` | No | http://localhost:11434/v1 | | ## Sample Configuration ```yaml -url: ${env.OLLAMA_URL:=http://localhost:11434} +base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1} ``` diff --git a/docs/docs/providers/inference/remote_openai.mdx b/docs/docs/providers/inference/remote_openai.mdx index 4931118fd..3ac3a21ad 100644 --- a/docs/docs/providers/inference/remote_openai.mdx +++ b/docs/docs/providers/inference/remote_openai.mdx @@ -17,7 +17,7 @@ OpenAI inference provider for accessing GPT models and other OpenAI services. | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `base_url` | `str` | No | https://api.openai.com/v1 | Base URL for OpenAI API | +| `base_url` | `HttpUrl \| None` | No | https://api.openai.com/v1 | Base URL for OpenAI API | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_passthrough.mdx b/docs/docs/providers/inference/remote_passthrough.mdx index 009961d49..325ecc352 100644 --- a/docs/docs/providers/inference/remote_passthrough.mdx +++ b/docs/docs/providers/inference/remote_passthrough.mdx @@ -17,11 +17,11 @@ Passthrough inference provider for connecting to any external inference service | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `url` | `str` | No | | The URL for the passthrough endpoint | +| `base_url` | `HttpUrl \| None` | No | | The URL for the passthrough endpoint | ## Sample Configuration ```yaml -url: ${env.PASSTHROUGH_URL} +base_url: ${env.PASSTHROUGH_URL} api_key: ${env.PASSTHROUGH_API_KEY} ``` diff --git a/docs/docs/providers/inference/remote_runpod.mdx b/docs/docs/providers/inference/remote_runpod.mdx index 3b67e157d..6cdcdd3b5 100644 --- a/docs/docs/providers/inference/remote_runpod.mdx +++ b/docs/docs/providers/inference/remote_runpod.mdx @@ -17,11 +17,11 @@ RunPod inference provider for running models on RunPod's cloud GPU platform. | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_token` | `SecretStr \| None` | No | | The API token | -| `url` | `str \| None` | No | | The URL for the Runpod model serving endpoint | +| `base_url` | `HttpUrl \| None` | No | | The URL for the Runpod model serving endpoint | ## Sample Configuration ```yaml -url: ${env.RUNPOD_URL:=} +base_url: ${env.RUNPOD_URL:=} api_token: ${env.RUNPOD_API_TOKEN} ``` diff --git a/docs/docs/providers/inference/remote_sambanova.mdx b/docs/docs/providers/inference/remote_sambanova.mdx index 6f4c5d7f6..bbefdb0f0 100644 --- a/docs/docs/providers/inference/remote_sambanova.mdx +++ b/docs/docs/providers/inference/remote_sambanova.mdx @@ -17,11 +17,11 @@ SambaNova inference provider for running models on SambaNova's dataflow architec | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `url` | `str` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server | +| `base_url` | `HttpUrl \| None` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server | ## Sample Configuration ```yaml -url: https://api.sambanova.ai/v1 +base_url: https://api.sambanova.ai/v1 api_key: ${env.SAMBANOVA_API_KEY:=} ``` diff --git a/docs/docs/providers/inference/remote_tgi.mdx b/docs/docs/providers/inference/remote_tgi.mdx index cd5ea7661..3790acdd4 100644 --- a/docs/docs/providers/inference/remote_tgi.mdx +++ b/docs/docs/providers/inference/remote_tgi.mdx @@ -16,10 +16,10 @@ Text Generation Inference (TGI) provider for HuggingFace model serving. |-------|------|----------|---------|-------------| | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | -| `url` | `str` | No | | The URL for the TGI serving endpoint | +| `base_url` | `HttpUrl \| None` | No | | The URL for the TGI serving endpoint (should include /v1 path) | ## Sample Configuration ```yaml -url: ${env.TGI_URL:=} +base_url: ${env.TGI_URL:=} ``` diff --git a/docs/docs/providers/inference/remote_together.mdx b/docs/docs/providers/inference/remote_together.mdx index 43192cc9e..dc025b5ac 100644 --- a/docs/docs/providers/inference/remote_together.mdx +++ b/docs/docs/providers/inference/remote_together.mdx @@ -17,11 +17,11 @@ Together AI inference provider for open-source models and collaborative AI devel | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `url` | `str` | No | https://api.together.xyz/v1 | The URL for the Together AI server | +| `base_url` | `HttpUrl \| None` | No | https://api.together.xyz/v1 | The URL for the Together AI server | ## Sample Configuration ```yaml -url: https://api.together.xyz/v1 +base_url: https://api.together.xyz/v1 api_key: ${env.TOGETHER_API_KEY:=} ``` diff --git a/docs/docs/providers/inference/remote_vllm.mdx b/docs/docs/providers/inference/remote_vllm.mdx index 81620dbca..a52c24adb 100644 --- a/docs/docs/providers/inference/remote_vllm.mdx +++ b/docs/docs/providers/inference/remote_vllm.mdx @@ -17,14 +17,14 @@ Remote vLLM inference provider for connecting to vLLM servers. | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_token` | `SecretStr \| None` | No | | The API token | -| `url` | `str \| None` | No | | The URL for the vLLM model serving endpoint | +| `base_url` | `HttpUrl \| None` | No | | The URL for the vLLM model serving endpoint | | `max_tokens` | `int` | No | 4096 | Maximum number of tokens to generate. | | `tls_verify` | `bool \| str` | No | True | Whether to verify TLS certificates. Can be a boolean or a path to a CA certificate file. | ## Sample Configuration ```yaml -url: ${env.VLLM_URL:=} +base_url: ${env.VLLM_URL:=} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} diff --git a/docs/docs/providers/inference/remote_watsonx.mdx b/docs/docs/providers/inference/remote_watsonx.mdx index 3a1dba3b4..47d543e3a 100644 --- a/docs/docs/providers/inference/remote_watsonx.mdx +++ b/docs/docs/providers/inference/remote_watsonx.mdx @@ -17,14 +17,14 @@ IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform | `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider | | `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider | -| `url` | `str` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai | +| `base_url` | `HttpUrl \| None` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai | | `project_id` | `str \| None` | No | | The watsonx.ai project ID | | `timeout` | `int` | No | 60 | Timeout for the HTTP requests | ## Sample Configuration ```yaml -url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com} +base_url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com} api_key: ${env.WATSONX_API_KEY:=} project_id: ${env.WATSONX_PROJECT_ID:=} ``` diff --git a/scripts/docker.sh b/scripts/docker.sh index b56df8c03..3b2db5ca7 100755 --- a/scripts/docker.sh +++ b/scripts/docker.sh @@ -287,9 +287,9 @@ start_container() { # On macOS/Windows, use host.docker.internal to reach host from container # On Linux with --network host, use localhost if [[ "$(uname)" == "Darwin" ]] || [[ "$(uname)" == *"MINGW"* ]]; then - OLLAMA_URL="${OLLAMA_URL:-http://host.docker.internal:11434}" + OLLAMA_URL="${OLLAMA_URL:-http://host.docker.internal:11434/v1}" else - OLLAMA_URL="${OLLAMA_URL:-http://localhost:11434}" + OLLAMA_URL="${OLLAMA_URL:-http://localhost:11434/v1}" fi DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OLLAMA_URL=$OLLAMA_URL" diff --git a/scripts/install.sh b/scripts/install.sh index 5e4939767..7fe1d3243 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -640,7 +640,7 @@ cmd=( run -d "${PLATFORM_OPTS[@]}" --name llama-stack \ --network llama-net \ -p "${PORT}:${PORT}" \ "${server_env_opts[@]}" \ - -e OLLAMA_URL="http://ollama-server:${OLLAMA_PORT}" \ + -e OLLAMA_URL="http://ollama-server:${OLLAMA_PORT}/v1" \ "${SERVER_IMAGE}" --port "${PORT}") log "🦙 Starting Llama Stack..." diff --git a/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml b/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml index 5384b58fe..d942c23a4 100644 --- a/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml +++ b/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml @@ -17,32 +17,32 @@ providers: - provider_id: ${env.CEREBRAS_API_KEY:+cerebras} provider_type: remote::cerebras config: - base_url: https://api.cerebras.ai + base_url: https://api.cerebras.ai/v1 api_key: ${env.CEREBRAS_API_KEY:=} - provider_id: ${env.OLLAMA_URL:+ollama} provider_type: remote::ollama config: - url: ${env.OLLAMA_URL:=http://localhost:11434} + base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1} - provider_id: ${env.VLLM_URL:+vllm} provider_type: remote::vllm config: - url: ${env.VLLM_URL:=} + base_url: ${env.VLLM_URL:=} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} - provider_id: ${env.TGI_URL:+tgi} provider_type: remote::tgi config: - url: ${env.TGI_URL:=} + base_url: ${env.TGI_URL:=} - provider_id: fireworks provider_type: remote::fireworks config: - url: https://api.fireworks.ai/inference/v1 + base_url: https://api.fireworks.ai/inference/v1 api_key: ${env.FIREWORKS_API_KEY:=} - provider_id: together provider_type: remote::together config: - url: https://api.together.xyz/v1 + base_url: https://api.together.xyz/v1 api_key: ${env.TOGETHER_API_KEY:=} - provider_id: bedrock provider_type: remote::bedrock @@ -52,9 +52,8 @@ providers: - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} + base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1} api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - provider_id: openai provider_type: remote::openai config: @@ -76,18 +75,18 @@ providers: - provider_id: groq provider_type: remote::groq config: - url: https://api.groq.com + base_url: https://api.groq.com/openai/v1 api_key: ${env.GROQ_API_KEY:=} - provider_id: sambanova provider_type: remote::sambanova config: - url: https://api.sambanova.ai/v1 + base_url: https://api.sambanova.ai/v1 api_key: ${env.SAMBANOVA_API_KEY:=} - provider_id: ${env.AZURE_API_KEY:+azure} provider_type: remote::azure config: api_key: ${env.AZURE_API_KEY:=} - api_base: ${env.AZURE_API_BASE:=} + base_url: ${env.AZURE_API_BASE:=} api_version: ${env.AZURE_API_VERSION:=} api_type: ${env.AZURE_API_TYPE:=} - provider_id: sentence-transformers diff --git a/src/llama_stack/distributions/ci-tests/run.yaml b/src/llama_stack/distributions/ci-tests/run.yaml index 1118d2ad1..8b1cd2bb2 100644 --- a/src/llama_stack/distributions/ci-tests/run.yaml +++ b/src/llama_stack/distributions/ci-tests/run.yaml @@ -17,32 +17,32 @@ providers: - provider_id: ${env.CEREBRAS_API_KEY:+cerebras} provider_type: remote::cerebras config: - base_url: https://api.cerebras.ai + base_url: https://api.cerebras.ai/v1 api_key: ${env.CEREBRAS_API_KEY:=} - provider_id: ${env.OLLAMA_URL:+ollama} provider_type: remote::ollama config: - url: ${env.OLLAMA_URL:=http://localhost:11434} + base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1} - provider_id: ${env.VLLM_URL:+vllm} provider_type: remote::vllm config: - url: ${env.VLLM_URL:=} + base_url: ${env.VLLM_URL:=} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} - provider_id: ${env.TGI_URL:+tgi} provider_type: remote::tgi config: - url: ${env.TGI_URL:=} + base_url: ${env.TGI_URL:=} - provider_id: fireworks provider_type: remote::fireworks config: - url: https://api.fireworks.ai/inference/v1 + base_url: https://api.fireworks.ai/inference/v1 api_key: ${env.FIREWORKS_API_KEY:=} - provider_id: together provider_type: remote::together config: - url: https://api.together.xyz/v1 + base_url: https://api.together.xyz/v1 api_key: ${env.TOGETHER_API_KEY:=} - provider_id: bedrock provider_type: remote::bedrock @@ -52,9 +52,8 @@ providers: - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} + base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1} api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - provider_id: openai provider_type: remote::openai config: @@ -76,18 +75,18 @@ providers: - provider_id: groq provider_type: remote::groq config: - url: https://api.groq.com + base_url: https://api.groq.com/openai/v1 api_key: ${env.GROQ_API_KEY:=} - provider_id: sambanova provider_type: remote::sambanova config: - url: https://api.sambanova.ai/v1 + base_url: https://api.sambanova.ai/v1 api_key: ${env.SAMBANOVA_API_KEY:=} - provider_id: ${env.AZURE_API_KEY:+azure} provider_type: remote::azure config: api_key: ${env.AZURE_API_KEY:=} - api_base: ${env.AZURE_API_BASE:=} + base_url: ${env.AZURE_API_BASE:=} api_version: ${env.AZURE_API_VERSION:=} api_type: ${env.AZURE_API_TYPE:=} - provider_id: sentence-transformers diff --git a/src/llama_stack/distributions/nvidia/run-with-safety.yaml b/src/llama_stack/distributions/nvidia/run-with-safety.yaml index 1d57ad17a..d2c7dd090 100644 --- a/src/llama_stack/distributions/nvidia/run-with-safety.yaml +++ b/src/llama_stack/distributions/nvidia/run-with-safety.yaml @@ -16,9 +16,8 @@ providers: - provider_id: nvidia provider_type: remote::nvidia config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} + base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1} api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - provider_id: nvidia provider_type: remote::nvidia config: diff --git a/src/llama_stack/distributions/nvidia/run.yaml b/src/llama_stack/distributions/nvidia/run.yaml index 8c50b8bfb..c267587c7 100644 --- a/src/llama_stack/distributions/nvidia/run.yaml +++ b/src/llama_stack/distributions/nvidia/run.yaml @@ -16,9 +16,8 @@ providers: - provider_id: nvidia provider_type: remote::nvidia config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} + base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1} api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} vector_io: - provider_id: faiss provider_type: inline::faiss diff --git a/src/llama_stack/distributions/open-benchmark/run.yaml b/src/llama_stack/distributions/open-benchmark/run.yaml index 912e48dd3..7ebc58841 100644 --- a/src/llama_stack/distributions/open-benchmark/run.yaml +++ b/src/llama_stack/distributions/open-benchmark/run.yaml @@ -27,12 +27,12 @@ providers: - provider_id: groq provider_type: remote::groq config: - url: https://api.groq.com + base_url: https://api.groq.com/openai/v1 api_key: ${env.GROQ_API_KEY:=} - provider_id: together provider_type: remote::together config: - url: https://api.together.xyz/v1 + base_url: https://api.together.xyz/v1 api_key: ${env.TOGETHER_API_KEY:=} vector_io: - provider_id: sqlite-vec diff --git a/src/llama_stack/distributions/postgres-demo/run.yaml b/src/llama_stack/distributions/postgres-demo/run.yaml index dd1c2bc7f..049f519cd 100644 --- a/src/llama_stack/distributions/postgres-demo/run.yaml +++ b/src/llama_stack/distributions/postgres-demo/run.yaml @@ -11,7 +11,7 @@ providers: - provider_id: vllm-inference provider_type: remote::vllm config: - url: ${env.VLLM_URL:=http://localhost:8000/v1} + base_url: ${env.VLLM_URL:=} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} diff --git a/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml b/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml index e29ada6f4..75cc9d188 100644 --- a/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +++ b/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml @@ -17,32 +17,32 @@ providers: - provider_id: ${env.CEREBRAS_API_KEY:+cerebras} provider_type: remote::cerebras config: - base_url: https://api.cerebras.ai + base_url: https://api.cerebras.ai/v1 api_key: ${env.CEREBRAS_API_KEY:=} - provider_id: ${env.OLLAMA_URL:+ollama} provider_type: remote::ollama config: - url: ${env.OLLAMA_URL:=http://localhost:11434} + base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1} - provider_id: ${env.VLLM_URL:+vllm} provider_type: remote::vllm config: - url: ${env.VLLM_URL:=} + base_url: ${env.VLLM_URL:=} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} - provider_id: ${env.TGI_URL:+tgi} provider_type: remote::tgi config: - url: ${env.TGI_URL:=} + base_url: ${env.TGI_URL:=} - provider_id: fireworks provider_type: remote::fireworks config: - url: https://api.fireworks.ai/inference/v1 + base_url: https://api.fireworks.ai/inference/v1 api_key: ${env.FIREWORKS_API_KEY:=} - provider_id: together provider_type: remote::together config: - url: https://api.together.xyz/v1 + base_url: https://api.together.xyz/v1 api_key: ${env.TOGETHER_API_KEY:=} - provider_id: bedrock provider_type: remote::bedrock @@ -52,9 +52,8 @@ providers: - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} + base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1} api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - provider_id: openai provider_type: remote::openai config: @@ -76,18 +75,18 @@ providers: - provider_id: groq provider_type: remote::groq config: - url: https://api.groq.com + base_url: https://api.groq.com/openai/v1 api_key: ${env.GROQ_API_KEY:=} - provider_id: sambanova provider_type: remote::sambanova config: - url: https://api.sambanova.ai/v1 + base_url: https://api.sambanova.ai/v1 api_key: ${env.SAMBANOVA_API_KEY:=} - provider_id: ${env.AZURE_API_KEY:+azure} provider_type: remote::azure config: api_key: ${env.AZURE_API_KEY:=} - api_base: ${env.AZURE_API_BASE:=} + base_url: ${env.AZURE_API_BASE:=} api_version: ${env.AZURE_API_VERSION:=} api_type: ${env.AZURE_API_TYPE:=} - provider_id: sentence-transformers diff --git a/src/llama_stack/distributions/starter-gpu/run.yaml b/src/llama_stack/distributions/starter-gpu/run.yaml index 7149b8659..09c7be5a1 100644 --- a/src/llama_stack/distributions/starter-gpu/run.yaml +++ b/src/llama_stack/distributions/starter-gpu/run.yaml @@ -17,32 +17,32 @@ providers: - provider_id: ${env.CEREBRAS_API_KEY:+cerebras} provider_type: remote::cerebras config: - base_url: https://api.cerebras.ai + base_url: https://api.cerebras.ai/v1 api_key: ${env.CEREBRAS_API_KEY:=} - provider_id: ${env.OLLAMA_URL:+ollama} provider_type: remote::ollama config: - url: ${env.OLLAMA_URL:=http://localhost:11434} + base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1} - provider_id: ${env.VLLM_URL:+vllm} provider_type: remote::vllm config: - url: ${env.VLLM_URL:=} + base_url: ${env.VLLM_URL:=} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} - provider_id: ${env.TGI_URL:+tgi} provider_type: remote::tgi config: - url: ${env.TGI_URL:=} + base_url: ${env.TGI_URL:=} - provider_id: fireworks provider_type: remote::fireworks config: - url: https://api.fireworks.ai/inference/v1 + base_url: https://api.fireworks.ai/inference/v1 api_key: ${env.FIREWORKS_API_KEY:=} - provider_id: together provider_type: remote::together config: - url: https://api.together.xyz/v1 + base_url: https://api.together.xyz/v1 api_key: ${env.TOGETHER_API_KEY:=} - provider_id: bedrock provider_type: remote::bedrock @@ -52,9 +52,8 @@ providers: - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} + base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1} api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - provider_id: openai provider_type: remote::openai config: @@ -76,18 +75,18 @@ providers: - provider_id: groq provider_type: remote::groq config: - url: https://api.groq.com + base_url: https://api.groq.com/openai/v1 api_key: ${env.GROQ_API_KEY:=} - provider_id: sambanova provider_type: remote::sambanova config: - url: https://api.sambanova.ai/v1 + base_url: https://api.sambanova.ai/v1 api_key: ${env.SAMBANOVA_API_KEY:=} - provider_id: ${env.AZURE_API_KEY:+azure} provider_type: remote::azure config: api_key: ${env.AZURE_API_KEY:=} - api_base: ${env.AZURE_API_BASE:=} + base_url: ${env.AZURE_API_BASE:=} api_version: ${env.AZURE_API_VERSION:=} api_type: ${env.AZURE_API_TYPE:=} - provider_id: sentence-transformers diff --git a/src/llama_stack/distributions/starter/run-with-postgres-store.yaml b/src/llama_stack/distributions/starter/run-with-postgres-store.yaml index 437674bf9..f59c809d2 100644 --- a/src/llama_stack/distributions/starter/run-with-postgres-store.yaml +++ b/src/llama_stack/distributions/starter/run-with-postgres-store.yaml @@ -17,32 +17,32 @@ providers: - provider_id: ${env.CEREBRAS_API_KEY:+cerebras} provider_type: remote::cerebras config: - base_url: https://api.cerebras.ai + base_url: https://api.cerebras.ai/v1 api_key: ${env.CEREBRAS_API_KEY:=} - provider_id: ${env.OLLAMA_URL:+ollama} provider_type: remote::ollama config: - url: ${env.OLLAMA_URL:=http://localhost:11434} + base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1} - provider_id: ${env.VLLM_URL:+vllm} provider_type: remote::vllm config: - url: ${env.VLLM_URL:=} + base_url: ${env.VLLM_URL:=} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} - provider_id: ${env.TGI_URL:+tgi} provider_type: remote::tgi config: - url: ${env.TGI_URL:=} + base_url: ${env.TGI_URL:=} - provider_id: fireworks provider_type: remote::fireworks config: - url: https://api.fireworks.ai/inference/v1 + base_url: https://api.fireworks.ai/inference/v1 api_key: ${env.FIREWORKS_API_KEY:=} - provider_id: together provider_type: remote::together config: - url: https://api.together.xyz/v1 + base_url: https://api.together.xyz/v1 api_key: ${env.TOGETHER_API_KEY:=} - provider_id: bedrock provider_type: remote::bedrock @@ -52,9 +52,8 @@ providers: - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} + base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1} api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - provider_id: openai provider_type: remote::openai config: @@ -76,18 +75,18 @@ providers: - provider_id: groq provider_type: remote::groq config: - url: https://api.groq.com + base_url: https://api.groq.com/openai/v1 api_key: ${env.GROQ_API_KEY:=} - provider_id: sambanova provider_type: remote::sambanova config: - url: https://api.sambanova.ai/v1 + base_url: https://api.sambanova.ai/v1 api_key: ${env.SAMBANOVA_API_KEY:=} - provider_id: ${env.AZURE_API_KEY:+azure} provider_type: remote::azure config: api_key: ${env.AZURE_API_KEY:=} - api_base: ${env.AZURE_API_BASE:=} + base_url: ${env.AZURE_API_BASE:=} api_version: ${env.AZURE_API_VERSION:=} api_type: ${env.AZURE_API_TYPE:=} - provider_id: sentence-transformers diff --git a/src/llama_stack/distributions/starter/run.yaml b/src/llama_stack/distributions/starter/run.yaml index 0ce392810..435bb22a7 100644 --- a/src/llama_stack/distributions/starter/run.yaml +++ b/src/llama_stack/distributions/starter/run.yaml @@ -17,32 +17,32 @@ providers: - provider_id: ${env.CEREBRAS_API_KEY:+cerebras} provider_type: remote::cerebras config: - base_url: https://api.cerebras.ai + base_url: https://api.cerebras.ai/v1 api_key: ${env.CEREBRAS_API_KEY:=} - provider_id: ${env.OLLAMA_URL:+ollama} provider_type: remote::ollama config: - url: ${env.OLLAMA_URL:=http://localhost:11434} + base_url: ${env.OLLAMA_URL:=http://localhost:11434/v1} - provider_id: ${env.VLLM_URL:+vllm} provider_type: remote::vllm config: - url: ${env.VLLM_URL:=} + base_url: ${env.VLLM_URL:=} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} - provider_id: ${env.TGI_URL:+tgi} provider_type: remote::tgi config: - url: ${env.TGI_URL:=} + base_url: ${env.TGI_URL:=} - provider_id: fireworks provider_type: remote::fireworks config: - url: https://api.fireworks.ai/inference/v1 + base_url: https://api.fireworks.ai/inference/v1 api_key: ${env.FIREWORKS_API_KEY:=} - provider_id: together provider_type: remote::together config: - url: https://api.together.xyz/v1 + base_url: https://api.together.xyz/v1 api_key: ${env.TOGETHER_API_KEY:=} - provider_id: bedrock provider_type: remote::bedrock @@ -52,9 +52,8 @@ providers: - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} + base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1} api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - provider_id: openai provider_type: remote::openai config: @@ -76,18 +75,18 @@ providers: - provider_id: groq provider_type: remote::groq config: - url: https://api.groq.com + base_url: https://api.groq.com/openai/v1 api_key: ${env.GROQ_API_KEY:=} - provider_id: sambanova provider_type: remote::sambanova config: - url: https://api.sambanova.ai/v1 + base_url: https://api.sambanova.ai/v1 api_key: ${env.SAMBANOVA_API_KEY:=} - provider_id: ${env.AZURE_API_KEY:+azure} provider_type: remote::azure config: api_key: ${env.AZURE_API_KEY:=} - api_base: ${env.AZURE_API_BASE:=} + base_url: ${env.AZURE_API_BASE:=} api_version: ${env.AZURE_API_VERSION:=} api_type: ${env.AZURE_API_TYPE:=} - provider_id: sentence-transformers diff --git a/src/llama_stack/distributions/watsonx/run.yaml b/src/llama_stack/distributions/watsonx/run.yaml index 8456115d2..f8c489fe3 100644 --- a/src/llama_stack/distributions/watsonx/run.yaml +++ b/src/llama_stack/distributions/watsonx/run.yaml @@ -15,7 +15,7 @@ providers: - provider_id: watsonx provider_type: remote::watsonx config: - url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com} + base_url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com} api_key: ${env.WATSONX_API_KEY:=} project_id: ${env.WATSONX_PROJECT_ID:=} vector_io: diff --git a/src/llama_stack/providers/remote/inference/azure/azure.py b/src/llama_stack/providers/remote/inference/azure/azure.py index 134d01b15..c977d75d5 100644 --- a/src/llama_stack/providers/remote/inference/azure/azure.py +++ b/src/llama_stack/providers/remote/inference/azure/azure.py @@ -4,8 +4,6 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from urllib.parse import urljoin - from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from .config import AzureConfig @@ -22,4 +20,4 @@ class AzureInferenceAdapter(OpenAIMixin): Returns the Azure API base URL from the configuration. """ - return urljoin(str(self.config.api_base), "/openai/v1") + return str(self.config.base_url) diff --git a/src/llama_stack/providers/remote/inference/azure/config.py b/src/llama_stack/providers/remote/inference/azure/config.py index b801b91b2..f6407a183 100644 --- a/src/llama_stack/providers/remote/inference/azure/config.py +++ b/src/llama_stack/providers/remote/inference/azure/config.py @@ -32,8 +32,9 @@ class AzureProviderDataValidator(BaseModel): @json_schema_type class AzureConfig(RemoteInferenceProviderConfig): - api_base: HttpUrl = Field( - description="Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com)", + base_url: HttpUrl | None = Field( + default=None, + description="Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com/openai/v1)", ) api_version: str | None = Field( default_factory=lambda: os.getenv("AZURE_API_VERSION"), @@ -48,14 +49,14 @@ class AzureConfig(RemoteInferenceProviderConfig): def sample_run_config( cls, api_key: str = "${env.AZURE_API_KEY:=}", - api_base: str = "${env.AZURE_API_BASE:=}", + base_url: str = "${env.AZURE_API_BASE:=}", api_version: str = "${env.AZURE_API_VERSION:=}", api_type: str = "${env.AZURE_API_TYPE:=}", **kwargs, ) -> dict[str, Any]: return { "api_key": api_key, - "api_base": api_base, + "base_url": base_url, "api_version": api_version, "api_type": api_type, } diff --git a/src/llama_stack/providers/remote/inference/cerebras/cerebras.py b/src/llama_stack/providers/remote/inference/cerebras/cerebras.py index 680431e22..23c27df1e 100644 --- a/src/llama_stack/providers/remote/inference/cerebras/cerebras.py +++ b/src/llama_stack/providers/remote/inference/cerebras/cerebras.py @@ -4,8 +4,6 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from urllib.parse import urljoin - from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack_api import ( OpenAIEmbeddingsRequestWithExtraBody, @@ -21,7 +19,7 @@ class CerebrasInferenceAdapter(OpenAIMixin): provider_data_api_key_field: str = "cerebras_api_key" def get_base_url(self) -> str: - return urljoin(self.config.base_url, "v1") + return str(self.config.base_url) async def openai_embeddings( self, diff --git a/src/llama_stack/providers/remote/inference/cerebras/config.py b/src/llama_stack/providers/remote/inference/cerebras/config.py index db357fd1c..ea88abbea 100644 --- a/src/llama_stack/providers/remote/inference/cerebras/config.py +++ b/src/llama_stack/providers/remote/inference/cerebras/config.py @@ -7,12 +7,12 @@ import os from typing import Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type -DEFAULT_BASE_URL = "https://api.cerebras.ai" +DEFAULT_BASE_URL = "https://api.cerebras.ai/v1" class CerebrasProviderDataValidator(BaseModel): @@ -24,8 +24,8 @@ class CerebrasProviderDataValidator(BaseModel): @json_schema_type class CerebrasImplConfig(RemoteInferenceProviderConfig): - base_url: str = Field( - default=os.environ.get("CEREBRAS_BASE_URL", DEFAULT_BASE_URL), + base_url: HttpUrl | None = Field( + default=HttpUrl(os.environ.get("CEREBRAS_BASE_URL", DEFAULT_BASE_URL)), description="Base URL for the Cerebras API", ) diff --git a/src/llama_stack/providers/remote/inference/databricks/config.py b/src/llama_stack/providers/remote/inference/databricks/config.py index bd409fa13..44cb862f9 100644 --- a/src/llama_stack/providers/remote/inference/databricks/config.py +++ b/src/llama_stack/providers/remote/inference/databricks/config.py @@ -6,7 +6,7 @@ from typing import Any -from pydantic import BaseModel, Field, SecretStr +from pydantic import BaseModel, Field, HttpUrl, SecretStr from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -21,9 +21,9 @@ class DatabricksProviderDataValidator(BaseModel): @json_schema_type class DatabricksImplConfig(RemoteInferenceProviderConfig): - url: str | None = Field( + base_url: HttpUrl | None = Field( default=None, - description="The URL for the Databricks model serving endpoint", + description="The URL for the Databricks model serving endpoint (should include /serving-endpoints path)", ) auth_credential: SecretStr | None = Field( default=None, @@ -34,11 +34,11 @@ class DatabricksImplConfig(RemoteInferenceProviderConfig): @classmethod def sample_run_config( cls, - url: str = "${env.DATABRICKS_HOST:=}", + base_url: str = "${env.DATABRICKS_HOST:=}", api_token: str = "${env.DATABRICKS_TOKEN:=}", **kwargs: Any, ) -> dict[str, Any]: return { - "url": url, + "base_url": base_url, "api_token": api_token, } diff --git a/src/llama_stack/providers/remote/inference/databricks/databricks.py b/src/llama_stack/providers/remote/inference/databricks/databricks.py index c07d97b67..f2f8832f6 100644 --- a/src/llama_stack/providers/remote/inference/databricks/databricks.py +++ b/src/llama_stack/providers/remote/inference/databricks/databricks.py @@ -29,15 +29,21 @@ class DatabricksInferenceAdapter(OpenAIMixin): } def get_base_url(self) -> str: - return f"{self.config.url}/serving-endpoints" + return str(self.config.base_url) async def list_provider_model_ids(self) -> Iterable[str]: # Filter out None values from endpoint names api_token = self._get_api_key_from_config_or_provider_data() + # WorkspaceClient expects base host without /serving-endpoints suffix + base_url_str = str(self.config.base_url) + if base_url_str.endswith("/serving-endpoints"): + host = base_url_str[:-18] # Remove '/serving-endpoints' + else: + host = base_url_str return [ endpoint.name # type: ignore[misc] for endpoint in WorkspaceClient( - host=self.config.url, token=api_token + host=host, token=api_token ).serving_endpoints.list() # TODO: this is not async ] diff --git a/src/llama_stack/providers/remote/inference/fireworks/config.py b/src/llama_stack/providers/remote/inference/fireworks/config.py index e36c76054..c59b5f270 100644 --- a/src/llama_stack/providers/remote/inference/fireworks/config.py +++ b/src/llama_stack/providers/remote/inference/fireworks/config.py @@ -6,7 +6,7 @@ from typing import Any -from pydantic import Field +from pydantic import Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -14,14 +14,14 @@ from llama_stack_api import json_schema_type @json_schema_type class FireworksImplConfig(RemoteInferenceProviderConfig): - url: str = Field( - default="https://api.fireworks.ai/inference/v1", + base_url: HttpUrl | None = Field( + default=HttpUrl("https://api.fireworks.ai/inference/v1"), description="The URL for the Fireworks server", ) @classmethod def sample_run_config(cls, api_key: str = "${env.FIREWORKS_API_KEY:=}", **kwargs) -> dict[str, Any]: return { - "url": "https://api.fireworks.ai/inference/v1", + "base_url": "https://api.fireworks.ai/inference/v1", "api_key": api_key, } diff --git a/src/llama_stack/providers/remote/inference/fireworks/fireworks.py b/src/llama_stack/providers/remote/inference/fireworks/fireworks.py index 7e2b73546..61ea0b1f6 100644 --- a/src/llama_stack/providers/remote/inference/fireworks/fireworks.py +++ b/src/llama_stack/providers/remote/inference/fireworks/fireworks.py @@ -24,4 +24,4 @@ class FireworksInferenceAdapter(OpenAIMixin): provider_data_api_key_field: str = "fireworks_api_key" def get_base_url(self) -> str: - return "https://api.fireworks.ai/inference/v1" + return str(self.config.base_url) diff --git a/src/llama_stack/providers/remote/inference/groq/config.py b/src/llama_stack/providers/remote/inference/groq/config.py index cca53a4e8..e5c29c271 100644 --- a/src/llama_stack/providers/remote/inference/groq/config.py +++ b/src/llama_stack/providers/remote/inference/groq/config.py @@ -6,7 +6,7 @@ from typing import Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -21,14 +21,14 @@ class GroqProviderDataValidator(BaseModel): @json_schema_type class GroqConfig(RemoteInferenceProviderConfig): - url: str = Field( - default="https://api.groq.com", + base_url: HttpUrl | None = Field( + default=HttpUrl("https://api.groq.com/openai/v1"), description="The URL for the Groq AI server", ) @classmethod def sample_run_config(cls, api_key: str = "${env.GROQ_API_KEY:=}", **kwargs) -> dict[str, Any]: return { - "url": "https://api.groq.com", + "base_url": "https://api.groq.com/openai/v1", "api_key": api_key, } diff --git a/src/llama_stack/providers/remote/inference/groq/groq.py b/src/llama_stack/providers/remote/inference/groq/groq.py index 3a4f2626d..f99de91ca 100644 --- a/src/llama_stack/providers/remote/inference/groq/groq.py +++ b/src/llama_stack/providers/remote/inference/groq/groq.py @@ -15,4 +15,4 @@ class GroqInferenceAdapter(OpenAIMixin): provider_data_api_key_field: str = "groq_api_key" def get_base_url(self) -> str: - return f"{self.config.url}/openai/v1" + return str(self.config.base_url) diff --git a/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py b/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py index ded210d89..a0f80d969 100644 --- a/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py +++ b/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py @@ -6,7 +6,7 @@ from typing import Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -21,14 +21,14 @@ class LlamaProviderDataValidator(BaseModel): @json_schema_type class LlamaCompatConfig(RemoteInferenceProviderConfig): - openai_compat_api_base: str = Field( - default="https://api.llama.com/compat/v1/", + base_url: HttpUrl | None = Field( + default=HttpUrl("https://api.llama.com/compat/v1/"), description="The URL for the Llama API server", ) @classmethod def sample_run_config(cls, api_key: str = "${env.LLAMA_API_KEY}", **kwargs) -> dict[str, Any]: return { - "openai_compat_api_base": "https://api.llama.com/compat/v1/", + "base_url": "https://api.llama.com/compat/v1/", "api_key": api_key, } diff --git a/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py b/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py index a5f67ecd1..f29aebf36 100644 --- a/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py +++ b/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py @@ -31,7 +31,7 @@ class LlamaCompatInferenceAdapter(OpenAIMixin): :return: The Llama API base URL """ - return self.config.openai_compat_api_base + return str(self.config.base_url) async def openai_completion( self, diff --git a/src/llama_stack/providers/remote/inference/nvidia/config.py b/src/llama_stack/providers/remote/inference/nvidia/config.py index e5b0c6b73..e1e9a0ea9 100644 --- a/src/llama_stack/providers/remote/inference/nvidia/config.py +++ b/src/llama_stack/providers/remote/inference/nvidia/config.py @@ -7,7 +7,7 @@ import os from typing import Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -44,18 +44,14 @@ class NVIDIAConfig(RemoteInferenceProviderConfig): URL of your running NVIDIA NIM and do not need to set the api_key. """ - url: str = Field( - default_factory=lambda: os.getenv("NVIDIA_BASE_URL", "https://integrate.api.nvidia.com"), + base_url: HttpUrl | None = Field( + default_factory=lambda: os.getenv("NVIDIA_BASE_URL", "https://integrate.api.nvidia.com/v1"), description="A base url for accessing the NVIDIA NIM", ) timeout: int = Field( default=60, description="Timeout for the HTTP requests", ) - append_api_version: bool = Field( - default_factory=lambda: os.getenv("NVIDIA_APPEND_API_VERSION", "True").lower() != "false", - description="When set to false, the API version will not be appended to the base_url. By default, it is true.", - ) rerank_model_to_url: dict[str, str] = Field( default_factory=lambda: { "nv-rerank-qa-mistral-4b:1": "https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking", @@ -68,13 +64,11 @@ class NVIDIAConfig(RemoteInferenceProviderConfig): @classmethod def sample_run_config( cls, - url: str = "${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}", + base_url: HttpUrl | None = "${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}", api_key: str = "${env.NVIDIA_API_KEY:=}", - append_api_version: bool = "${env.NVIDIA_APPEND_API_VERSION:=True}", **kwargs, ) -> dict[str, Any]: return { - "url": url, + "base_url": base_url, "api_key": api_key, - "append_api_version": append_api_version, } diff --git a/src/llama_stack/providers/remote/inference/nvidia/nvidia.py b/src/llama_stack/providers/remote/inference/nvidia/nvidia.py index 17f8775bf..5d0d52d6a 100644 --- a/src/llama_stack/providers/remote/inference/nvidia/nvidia.py +++ b/src/llama_stack/providers/remote/inference/nvidia/nvidia.py @@ -44,7 +44,7 @@ class NVIDIAInferenceAdapter(OpenAIMixin): } async def initialize(self) -> None: - logger.info(f"Initializing NVIDIAInferenceAdapter({self.config.url})...") + logger.info(f"Initializing NVIDIAInferenceAdapter({self.config.base_url})...") if _is_nvidia_hosted(self.config): if not self.config.auth_credential: @@ -72,7 +72,7 @@ class NVIDIAInferenceAdapter(OpenAIMixin): :return: The NVIDIA API base URL """ - return f"{self.config.url}/v1" if self.config.append_api_version else self.config.url + return str(self.config.base_url) async def list_provider_model_ids(self) -> Iterable[str]: """ diff --git a/src/llama_stack/providers/remote/inference/nvidia/utils.py b/src/llama_stack/providers/remote/inference/nvidia/utils.py index 46ee939d9..c138d1fc5 100644 --- a/src/llama_stack/providers/remote/inference/nvidia/utils.py +++ b/src/llama_stack/providers/remote/inference/nvidia/utils.py @@ -8,4 +8,4 @@ from . import NVIDIAConfig def _is_nvidia_hosted(config: NVIDIAConfig) -> bool: - return "integrate.api.nvidia.com" in config.url + return "integrate.api.nvidia.com" in str(config.base_url) diff --git a/src/llama_stack/providers/remote/inference/ollama/config.py b/src/llama_stack/providers/remote/inference/ollama/config.py index 416b847a0..60dd34fa8 100644 --- a/src/llama_stack/providers/remote/inference/ollama/config.py +++ b/src/llama_stack/providers/remote/inference/ollama/config.py @@ -6,20 +6,22 @@ from typing import Any -from pydantic import Field, SecretStr +from pydantic import Field, HttpUrl, SecretStr from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig -DEFAULT_OLLAMA_URL = "http://localhost:11434" +DEFAULT_OLLAMA_URL = "http://localhost:11434/v1" class OllamaImplConfig(RemoteInferenceProviderConfig): auth_credential: SecretStr | None = Field(default=None, exclude=True) - url: str = DEFAULT_OLLAMA_URL + base_url: HttpUrl | None = Field(default=HttpUrl(DEFAULT_OLLAMA_URL)) @classmethod - def sample_run_config(cls, url: str = "${env.OLLAMA_URL:=http://localhost:11434}", **kwargs) -> dict[str, Any]: + def sample_run_config( + cls, base_url: str = "${env.OLLAMA_URL:=http://localhost:11434/v1}", **kwargs + ) -> dict[str, Any]: return { - "url": url, + "base_url": base_url, } diff --git a/src/llama_stack/providers/remote/inference/ollama/ollama.py b/src/llama_stack/providers/remote/inference/ollama/ollama.py index d1bf85361..e8b872384 100644 --- a/src/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/src/llama_stack/providers/remote/inference/ollama/ollama.py @@ -55,17 +55,23 @@ class OllamaInferenceAdapter(OpenAIMixin): # ollama client attaches itself to the current event loop (sadly?) loop = asyncio.get_running_loop() if loop not in self._clients: - self._clients[loop] = AsyncOllamaClient(host=self.config.url) + # Ollama client expects base URL without /v1 suffix + base_url_str = str(self.config.base_url) + if base_url_str.endswith("/v1"): + host = base_url_str[:-3] + else: + host = base_url_str + self._clients[loop] = AsyncOllamaClient(host=host) return self._clients[loop] def get_api_key(self): return "NO KEY REQUIRED" def get_base_url(self): - return self.config.url.rstrip("/") + "/v1" + return str(self.config.base_url) async def initialize(self) -> None: - logger.info(f"checking connectivity to Ollama at `{self.config.url}`...") + logger.info(f"checking connectivity to Ollama at `{self.config.base_url}`...") r = await self.health() if r["status"] == HealthStatus.ERROR: logger.warning( diff --git a/src/llama_stack/providers/remote/inference/openai/config.py b/src/llama_stack/providers/remote/inference/openai/config.py index ab28e571f..2057cd0d6 100644 --- a/src/llama_stack/providers/remote/inference/openai/config.py +++ b/src/llama_stack/providers/remote/inference/openai/config.py @@ -6,7 +6,7 @@ from typing import Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -21,8 +21,8 @@ class OpenAIProviderDataValidator(BaseModel): @json_schema_type class OpenAIConfig(RemoteInferenceProviderConfig): - base_url: str = Field( - default="https://api.openai.com/v1", + base_url: HttpUrl | None = Field( + default=HttpUrl("https://api.openai.com/v1"), description="Base URL for OpenAI API", ) diff --git a/src/llama_stack/providers/remote/inference/openai/openai.py b/src/llama_stack/providers/remote/inference/openai/openai.py index 52bc48f1a..2d465546a 100644 --- a/src/llama_stack/providers/remote/inference/openai/openai.py +++ b/src/llama_stack/providers/remote/inference/openai/openai.py @@ -35,4 +35,4 @@ class OpenAIInferenceAdapter(OpenAIMixin): Returns the OpenAI API base URL from the configuration. """ - return self.config.base_url + return str(self.config.base_url) diff --git a/src/llama_stack/providers/remote/inference/passthrough/config.py b/src/llama_stack/providers/remote/inference/passthrough/config.py index 54508b6fb..f45806e79 100644 --- a/src/llama_stack/providers/remote/inference/passthrough/config.py +++ b/src/llama_stack/providers/remote/inference/passthrough/config.py @@ -6,7 +6,7 @@ from typing import Any -from pydantic import Field +from pydantic import Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -14,16 +14,16 @@ from llama_stack_api import json_schema_type @json_schema_type class PassthroughImplConfig(RemoteInferenceProviderConfig): - url: str = Field( + base_url: HttpUrl | None = Field( default=None, description="The URL for the passthrough endpoint", ) @classmethod def sample_run_config( - cls, url: str = "${env.PASSTHROUGH_URL}", api_key: str = "${env.PASSTHROUGH_API_KEY}", **kwargs + cls, base_url: HttpUrl | None = "${env.PASSTHROUGH_URL}", api_key: str = "${env.PASSTHROUGH_API_KEY}", **kwargs ) -> dict[str, Any]: return { - "url": url, + "base_url": base_url, "api_key": api_key, } diff --git a/src/llama_stack/providers/remote/inference/passthrough/passthrough.py b/src/llama_stack/providers/remote/inference/passthrough/passthrough.py index 75eedf026..b0e2e74ad 100644 --- a/src/llama_stack/providers/remote/inference/passthrough/passthrough.py +++ b/src/llama_stack/providers/remote/inference/passthrough/passthrough.py @@ -82,8 +82,8 @@ class PassthroughInferenceAdapter(NeedsRequestProviderData, Inference): def _get_passthrough_url(self) -> str: """Get the passthrough URL from config or provider data.""" - if self.config.url is not None: - return self.config.url + if self.config.base_url is not None: + return str(self.config.base_url) provider_data = self.get_request_provider_data() if provider_data is None: diff --git a/src/llama_stack/providers/remote/inference/runpod/config.py b/src/llama_stack/providers/remote/inference/runpod/config.py index 2ee56ca94..8d06f5263 100644 --- a/src/llama_stack/providers/remote/inference/runpod/config.py +++ b/src/llama_stack/providers/remote/inference/runpod/config.py @@ -6,7 +6,7 @@ from typing import Any -from pydantic import BaseModel, Field, SecretStr +from pydantic import BaseModel, Field, HttpUrl, SecretStr from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -21,7 +21,7 @@ class RunpodProviderDataValidator(BaseModel): @json_schema_type class RunpodImplConfig(RemoteInferenceProviderConfig): - url: str | None = Field( + base_url: HttpUrl | None = Field( default=None, description="The URL for the Runpod model serving endpoint", ) @@ -34,6 +34,6 @@ class RunpodImplConfig(RemoteInferenceProviderConfig): @classmethod def sample_run_config(cls, **kwargs: Any) -> dict[str, Any]: return { - "url": "${env.RUNPOD_URL:=}", + "base_url": "${env.RUNPOD_URL:=}", "api_token": "${env.RUNPOD_API_TOKEN}", } diff --git a/src/llama_stack/providers/remote/inference/runpod/runpod.py b/src/llama_stack/providers/remote/inference/runpod/runpod.py index 9c770cc24..04ad12851 100644 --- a/src/llama_stack/providers/remote/inference/runpod/runpod.py +++ b/src/llama_stack/providers/remote/inference/runpod/runpod.py @@ -28,7 +28,7 @@ class RunpodInferenceAdapter(OpenAIMixin): def get_base_url(self) -> str: """Get base URL for OpenAI client.""" - return self.config.url + return str(self.config.base_url) async def openai_chat_completion( self, diff --git a/src/llama_stack/providers/remote/inference/sambanova/config.py b/src/llama_stack/providers/remote/inference/sambanova/config.py index 93679ba99..79cda75a0 100644 --- a/src/llama_stack/providers/remote/inference/sambanova/config.py +++ b/src/llama_stack/providers/remote/inference/sambanova/config.py @@ -6,7 +6,7 @@ from typing import Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -21,14 +21,14 @@ class SambaNovaProviderDataValidator(BaseModel): @json_schema_type class SambaNovaImplConfig(RemoteInferenceProviderConfig): - url: str = Field( - default="https://api.sambanova.ai/v1", + base_url: HttpUrl | None = Field( + default=HttpUrl("https://api.sambanova.ai/v1"), description="The URL for the SambaNova AI server", ) @classmethod def sample_run_config(cls, api_key: str = "${env.SAMBANOVA_API_KEY:=}", **kwargs) -> dict[str, Any]: return { - "url": "https://api.sambanova.ai/v1", + "base_url": "https://api.sambanova.ai/v1", "api_key": api_key, } diff --git a/src/llama_stack/providers/remote/inference/sambanova/sambanova.py b/src/llama_stack/providers/remote/inference/sambanova/sambanova.py index daa4b1670..cb01e3a90 100644 --- a/src/llama_stack/providers/remote/inference/sambanova/sambanova.py +++ b/src/llama_stack/providers/remote/inference/sambanova/sambanova.py @@ -25,4 +25,4 @@ class SambaNovaInferenceAdapter(OpenAIMixin): :return: The SambaNova base URL """ - return self.config.url + return str(self.config.base_url) diff --git a/src/llama_stack/providers/remote/inference/tgi/config.py b/src/llama_stack/providers/remote/inference/tgi/config.py index 74edc8523..44cb4b812 100644 --- a/src/llama_stack/providers/remote/inference/tgi/config.py +++ b/src/llama_stack/providers/remote/inference/tgi/config.py @@ -5,7 +5,7 @@ # the root directory of this source tree. -from pydantic import BaseModel, Field, SecretStr +from pydantic import BaseModel, Field, HttpUrl, SecretStr from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -15,18 +15,19 @@ from llama_stack_api import json_schema_type class TGIImplConfig(RemoteInferenceProviderConfig): auth_credential: SecretStr | None = Field(default=None, exclude=True) - url: str = Field( - description="The URL for the TGI serving endpoint", + base_url: HttpUrl | None = Field( + default=None, + description="The URL for the TGI serving endpoint (should include /v1 path)", ) @classmethod def sample_run_config( cls, - url: str = "${env.TGI_URL:=}", + base_url: str = "${env.TGI_URL:=}", **kwargs, ): return { - "url": url, + "base_url": base_url, } diff --git a/src/llama_stack/providers/remote/inference/tgi/tgi.py b/src/llama_stack/providers/remote/inference/tgi/tgi.py index dd47ccc62..5dc8c33f7 100644 --- a/src/llama_stack/providers/remote/inference/tgi/tgi.py +++ b/src/llama_stack/providers/remote/inference/tgi/tgi.py @@ -8,7 +8,7 @@ from collections.abc import Iterable from huggingface_hub import AsyncInferenceClient, HfApi -from pydantic import SecretStr +from pydantic import HttpUrl, SecretStr from llama_stack.log import get_logger from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin @@ -23,7 +23,7 @@ log = get_logger(name=__name__, category="inference::tgi") class _HfAdapter(OpenAIMixin): - url: str + base_url: HttpUrl api_key: SecretStr hf_client: AsyncInferenceClient @@ -36,7 +36,7 @@ class _HfAdapter(OpenAIMixin): return "NO KEY REQUIRED" def get_base_url(self): - return self.url + return self.base_url async def list_provider_model_ids(self) -> Iterable[str]: return [self.model_id] @@ -50,14 +50,20 @@ class _HfAdapter(OpenAIMixin): class TGIAdapter(_HfAdapter): async def initialize(self, config: TGIImplConfig) -> None: - if not config.url: + if not config.base_url: raise ValueError("You must provide a URL in run.yaml (or via the TGI_URL environment variable) to use TGI.") - log.info(f"Initializing TGI client with url={config.url}") - self.hf_client = AsyncInferenceClient(model=config.url, provider="hf-inference") + log.info(f"Initializing TGI client with url={config.base_url}") + # Extract base URL without /v1 for HF client initialization + base_url_str = str(config.base_url).rstrip("/") + if base_url_str.endswith("/v1"): + base_url_for_client = base_url_str[:-3] + else: + base_url_for_client = base_url_str + self.hf_client = AsyncInferenceClient(model=base_url_for_client, provider="hf-inference") endpoint_info = await self.hf_client.get_endpoint_info() self.max_tokens = endpoint_info["max_total_tokens"] self.model_id = endpoint_info["model_id"] - self.url = f"{config.url.rstrip('/')}/v1" + self.base_url = config.base_url self.api_key = SecretStr("NO_KEY") diff --git a/src/llama_stack/providers/remote/inference/together/config.py b/src/llama_stack/providers/remote/inference/together/config.py index c1b3c4a55..16f0686ba 100644 --- a/src/llama_stack/providers/remote/inference/together/config.py +++ b/src/llama_stack/providers/remote/inference/together/config.py @@ -6,7 +6,7 @@ from typing import Any -from pydantic import Field +from pydantic import Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -14,14 +14,14 @@ from llama_stack_api import json_schema_type @json_schema_type class TogetherImplConfig(RemoteInferenceProviderConfig): - url: str = Field( - default="https://api.together.xyz/v1", + base_url: HttpUrl | None = Field( + default=HttpUrl("https://api.together.xyz/v1"), description="The URL for the Together AI server", ) @classmethod def sample_run_config(cls, **kwargs) -> dict[str, Any]: return { - "url": "https://api.together.xyz/v1", + "base_url": "https://api.together.xyz/v1", "api_key": "${env.TOGETHER_API_KEY:=}", } diff --git a/src/llama_stack/providers/remote/inference/together/together.py b/src/llama_stack/providers/remote/inference/together/together.py index cd34aec5e..0826dbcd2 100644 --- a/src/llama_stack/providers/remote/inference/together/together.py +++ b/src/llama_stack/providers/remote/inference/together/together.py @@ -9,7 +9,6 @@ from collections.abc import Iterable from typing import Any, cast from together import AsyncTogether # type: ignore[import-untyped] -from together.constants import BASE_URL # type: ignore[import-untyped] from llama_stack.core.request_headers import NeedsRequestProviderData from llama_stack.log import get_logger @@ -42,7 +41,7 @@ class TogetherInferenceAdapter(OpenAIMixin, NeedsRequestProviderData): provider_data_api_key_field: str = "together_api_key" def get_base_url(self): - return BASE_URL + return str(self.config.base_url) def _get_client(self) -> AsyncTogether: together_api_key = None diff --git a/src/llama_stack/providers/remote/inference/vllm/config.py b/src/llama_stack/providers/remote/inference/vllm/config.py index c43533ee4..db6c74431 100644 --- a/src/llama_stack/providers/remote/inference/vllm/config.py +++ b/src/llama_stack/providers/remote/inference/vllm/config.py @@ -6,7 +6,7 @@ from pathlib import Path -from pydantic import Field, SecretStr, field_validator +from pydantic import Field, HttpUrl, SecretStr, field_validator from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -14,7 +14,7 @@ from llama_stack_api import json_schema_type @json_schema_type class VLLMInferenceAdapterConfig(RemoteInferenceProviderConfig): - url: str | None = Field( + base_url: HttpUrl | None = Field( default=None, description="The URL for the vLLM model serving endpoint", ) @@ -48,11 +48,11 @@ class VLLMInferenceAdapterConfig(RemoteInferenceProviderConfig): @classmethod def sample_run_config( cls, - url: str = "${env.VLLM_URL:=}", + base_url: str = "${env.VLLM_URL:=}", **kwargs, ): return { - "url": url, + "base_url": base_url, "max_tokens": "${env.VLLM_MAX_TOKENS:=4096}", "api_token": "${env.VLLM_API_TOKEN:=fake}", "tls_verify": "${env.VLLM_TLS_VERIFY:=true}", diff --git a/src/llama_stack/providers/remote/inference/vllm/vllm.py b/src/llama_stack/providers/remote/inference/vllm/vllm.py index 1510e9384..6664ca36b 100644 --- a/src/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/src/llama_stack/providers/remote/inference/vllm/vllm.py @@ -39,12 +39,12 @@ class VLLMInferenceAdapter(OpenAIMixin): def get_base_url(self) -> str: """Get the base URL from config.""" - if not self.config.url: + if not self.config.base_url: raise ValueError("No base URL configured") - return self.config.url + return str(self.config.base_url) async def initialize(self) -> None: - if not self.config.url: + if not self.config.base_url: raise ValueError( "You must provide a URL in run.yaml (or via the VLLM_URL environment variable) to use vLLM." ) diff --git a/src/llama_stack/providers/remote/inference/watsonx/config.py b/src/llama_stack/providers/remote/inference/watsonx/config.py index 914f80820..be2b2c0ab 100644 --- a/src/llama_stack/providers/remote/inference/watsonx/config.py +++ b/src/llama_stack/providers/remote/inference/watsonx/config.py @@ -7,7 +7,7 @@ import os from typing import Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, HttpUrl from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack_api import json_schema_type @@ -23,7 +23,7 @@ class WatsonXProviderDataValidator(BaseModel): @json_schema_type class WatsonXConfig(RemoteInferenceProviderConfig): - url: str = Field( + base_url: HttpUrl | None = Field( default_factory=lambda: os.getenv("WATSONX_BASE_URL", "https://us-south.ml.cloud.ibm.com"), description="A base url for accessing the watsonx.ai", ) @@ -39,7 +39,7 @@ class WatsonXConfig(RemoteInferenceProviderConfig): @classmethod def sample_run_config(cls, **kwargs) -> dict[str, Any]: return { - "url": "${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com}", + "base_url": "${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com}", "api_key": "${env.WATSONX_API_KEY:=}", "project_id": "${env.WATSONX_PROJECT_ID:=}", } diff --git a/src/llama_stack/providers/remote/inference/watsonx/watsonx.py b/src/llama_stack/providers/remote/inference/watsonx/watsonx.py index aab9e2dca..5684f6c17 100644 --- a/src/llama_stack/providers/remote/inference/watsonx/watsonx.py +++ b/src/llama_stack/providers/remote/inference/watsonx/watsonx.py @@ -255,7 +255,7 @@ class WatsonXInferenceAdapter(LiteLLMOpenAIMixin): ) def get_base_url(self) -> str: - return self.config.url + return str(self.config.base_url) # Copied from OpenAIMixin async def check_model_availability(self, model: str) -> bool: @@ -316,7 +316,7 @@ class WatsonXInferenceAdapter(LiteLLMOpenAIMixin): """ Retrieves foundation model specifications from the watsonx.ai API. """ - url = f"{self.config.url}/ml/v1/foundation_model_specs?version=2023-10-25" + url = f"{str(self.config.base_url)}/ml/v1/foundation_model_specs?version=2023-10-25" headers = { # Note that there is no authorization header. Listing models does not require authentication. "Content-Type": "application/json", diff --git a/tests/integration/suites.py b/tests/integration/suites.py index 7689657b4..10c872705 100644 --- a/tests/integration/suites.py +++ b/tests/integration/suites.py @@ -50,7 +50,7 @@ SETUP_DEFINITIONS: dict[str, Setup] = { name="ollama", description="Local Ollama provider with text + safety models", env={ - "OLLAMA_URL": "http://0.0.0.0:11434", + "OLLAMA_URL": "http://0.0.0.0:11434/v1", "SAFETY_MODEL": "ollama/llama-guard3:1b", }, defaults={ @@ -64,7 +64,7 @@ SETUP_DEFINITIONS: dict[str, Setup] = { name="ollama", description="Local Ollama provider with a vision model", env={ - "OLLAMA_URL": "http://0.0.0.0:11434", + "OLLAMA_URL": "http://0.0.0.0:11434/v1", }, defaults={ "vision_model": "ollama/llama3.2-vision:11b", @@ -75,7 +75,7 @@ SETUP_DEFINITIONS: dict[str, Setup] = { name="ollama-postgres", description="Server-mode tests with Postgres-backed persistence", env={ - "OLLAMA_URL": "http://0.0.0.0:11434", + "OLLAMA_URL": "http://0.0.0.0:11434/v1", "SAFETY_MODEL": "ollama/llama-guard3:1b", "POSTGRES_HOST": "127.0.0.1", "POSTGRES_PORT": "5432", diff --git a/tests/unit/providers/inference/test_inference_client_caching.py b/tests/unit/providers/inference/test_inference_client_caching.py index aa3a2c77a..6ddf790af 100644 --- a/tests/unit/providers/inference/test_inference_client_caching.py +++ b/tests/unit/providers/inference/test_inference_client_caching.py @@ -120,7 +120,7 @@ from llama_stack.providers.remote.inference.watsonx.watsonx import WatsonXInfere VLLMInferenceAdapter, "llama_stack.providers.remote.inference.vllm.VLLMProviderDataValidator", { - "url": "http://fake", + "base_url": "http://fake", }, ), ], @@ -153,7 +153,7 @@ def test_litellm_provider_data_used(config_cls, adapter_cls, provider_data_valid """Validate data for LiteLLM-based providers. Similar to test_openai_provider_data_used, but without the assumption that there is an OpenAI-compatible client object.""" - inference_adapter = adapter_cls(config=config_cls()) + inference_adapter = adapter_cls(config=config_cls(base_url="http://fake")) inference_adapter.__provider_spec__ = MagicMock() inference_adapter.__provider_spec__.provider_data_validator = provider_data_validator diff --git a/tests/unit/providers/inference/test_remote_vllm.py b/tests/unit/providers/inference/test_remote_vllm.py index 958895cc4..0cf8ed306 100644 --- a/tests/unit/providers/inference/test_remote_vllm.py +++ b/tests/unit/providers/inference/test_remote_vllm.py @@ -40,7 +40,7 @@ from llama_stack_api import ( @pytest.fixture(scope="function") async def vllm_inference_adapter(): - config = VLLMInferenceAdapterConfig(url="http://mocked.localhost:12345") + config = VLLMInferenceAdapterConfig(base_url="http://mocked.localhost:12345") inference_adapter = VLLMInferenceAdapter(config=config) inference_adapter.model_store = AsyncMock() await inference_adapter.initialize() @@ -204,7 +204,7 @@ async def test_vllm_completion_extra_body(): via extra_body to the underlying OpenAI client through the InferenceRouter. """ # Set up the vLLM adapter - config = VLLMInferenceAdapterConfig(url="http://mocked.localhost:12345") + config = VLLMInferenceAdapterConfig(base_url="http://mocked.localhost:12345") vllm_adapter = VLLMInferenceAdapter(config=config) vllm_adapter.__provider_id__ = "vllm" await vllm_adapter.initialize() @@ -277,7 +277,7 @@ async def test_vllm_chat_completion_extra_body(): via extra_body to the underlying OpenAI client through the InferenceRouter for chat completion. """ # Set up the vLLM adapter - config = VLLMInferenceAdapterConfig(url="http://mocked.localhost:12345") + config = VLLMInferenceAdapterConfig(base_url="http://mocked.localhost:12345") vllm_adapter = VLLMInferenceAdapter(config=config) vllm_adapter.__provider_id__ = "vllm" await vllm_adapter.initialize() diff --git a/tests/unit/providers/nvidia/test_rerank_inference.py b/tests/unit/providers/nvidia/test_rerank_inference.py index ee62910b8..4ad9dc766 100644 --- a/tests/unit/providers/nvidia/test_rerank_inference.py +++ b/tests/unit/providers/nvidia/test_rerank_inference.py @@ -146,7 +146,7 @@ async def test_hosted_model_not_in_endpoint_mapping(): async def test_self_hosted_ignores_endpoint(): adapter = create_adapter( - config=NVIDIAConfig(url="http://localhost:8000", api_key=None), + config=NVIDIAConfig(base_url="http://localhost:8000", api_key=None), rerank_endpoints={"test-model": "https://model.endpoint/rerank"}, # This should be ignored for self-hosted. ) mock_session = MockSession(MockResponse()) diff --git a/tests/unit/providers/test_configs.py b/tests/unit/providers/test_configs.py index 867cfffbc..b4ba78394 100644 --- a/tests/unit/providers/test_configs.py +++ b/tests/unit/providers/test_configs.py @@ -4,8 +4,10 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +from typing import get_args, get_origin + import pytest -from pydantic import BaseModel +from pydantic import BaseModel, HttpUrl from llama_stack.core.distribution import get_provider_registry, providable_apis from llama_stack.core.utils.dynamic import instantiate_class_type @@ -41,3 +43,55 @@ class TestProviderConfigurations: sample_config = config_type.sample_run_config(__distro_dir__="foobarbaz") assert isinstance(sample_config, dict), f"{config_class_name}.sample_run_config() did not return a dict" + + def test_remote_inference_url_standardization(self): + """Verify all remote inference providers use standardized base_url configuration.""" + provider_registry = get_provider_registry() + inference_providers = provider_registry.get("inference", {}) + + # Filter for remote providers only + remote_providers = {k: v for k, v in inference_providers.items() if k.startswith("remote::")} + + failures = [] + for provider_type, provider_spec in remote_providers.items(): + try: + config_class_name = provider_spec.config_class + config_type = instantiate_class_type(config_class_name) + + # Check that config has base_url field (not url) + if hasattr(config_type, "model_fields"): + fields = config_type.model_fields + + # Should NOT have 'url' field (old pattern) + if "url" in fields: + failures.append( + f"{provider_type}: Uses deprecated 'url' field instead of 'base_url'. " + f"Please rename to 'base_url' for consistency." + ) + + # Should have 'base_url' field with HttpUrl | None type + if "base_url" in fields: + field_info = fields["base_url"] + annotation = field_info.annotation + + # Check if it's HttpUrl or HttpUrl | None + # get_origin() returns Union for (X | Y), None for plain types + # get_args() returns the types inside Union, e.g. (HttpUrl, NoneType) + is_valid = False + if get_origin(annotation) is not None: # It's a Union/Optional + if HttpUrl in get_args(annotation): + is_valid = True + elif annotation == HttpUrl: # Plain HttpUrl without | None + is_valid = True + + if not is_valid: + failures.append( + f"{provider_type}: base_url field has incorrect type annotation. " + f"Expected 'HttpUrl | None', got '{annotation}'" + ) + + except Exception as e: + failures.append(f"{provider_type}: Error checking URL standardization: {str(e)}") + + if failures: + pytest.fail("URL standardization violations found:\n" + "\n".join(f" - {f}" for f in failures)) From 4e9633f7c35e00607e1c5e75b2e14fbc97cff6b8 Mon Sep 17 00:00:00 2001 From: Anik Date: Wed, 19 Nov 2025 13:04:24 -0500 Subject: [PATCH 02/14] feat: Make Safety API an optional dependency for meta-reference agents provider (#4169) # What does this PR do? Change Safety API from required to optional dependency, following the established pattern used for other optional dependencies in Llama Stack. The provider now starts successfully without Safety API configured. Requests that explicitly include guardrails will receive a clear error message when Safety API is unavailable. This enables local development and testing without Safety API while maintaining clear error messages when guardrail features are requested. Closes #4165 Signed-off-by: Anik Bhattacharjee ## Test Plan 1. New unit tests added in `tests/unit/providers/agents/meta_reference/test_safety_optional.py` 2. Integration tests performed with the files in https://gist.github.com/anik120/c33cef497ec7085e1fe2164e0705b8d6 (i) test with `test_integration_no_safety_fail.yaml`: Config WITHOUT Safety API, should fail with helpful error since `required_safety_api` is `true` by default ``` $ uv run llama stack run test_integration_no_safety_fail.yaml 2>&1 | grep -B 5 -A 15 "ValueError.*Safety\|Safety API is required" File "/Users/anbhatta/go/src/github.com/llamastack/llama-stack/src/llama_stack/providers/inline/agents/meta_reference /__init__.py", line 27, in get_provider_impl raise ValueError( ...<9 lines>... ) ValueError: Safety API is required but not configured. To run without safety checks, explicitly set in your configuration: providers: agents: - provider_id: meta-reference provider_type: inline::meta-reference config: require_safety_api: false Warning: This disables all safety guardrails for this agents provider. ``` (ii) test with `test_integration_no_safety_works.yaml` Config WITHOUT Safety API, **but** `require_safety_api=false` is explicitly set, should succeed ``` $ uv run llama stack run test_integration_no_safety_works.yaml INFO 2025-11-16 09:49:10,044 llama_stack.cli.stack.run:169 cli: Using run configuration: /Users/anbhatta/go/src/github.com/llamastack/llama-stack/test_integration_no_safety_works.yaml INFO 2025-11-16 09:49:10,052 llama_stack.cli.stack.run:228 cli: HTTPS enabled with certificates: Key: None Cert: None . . . INFO 2025-11-16 09:49:38,528 llama_stack.core.stack:495 core: starting registry refresh task INFO 2025-11-16 09:49:38,534 uvicorn.error:62 uncategorized: Application startup complete. INFO 2025-11-16 09:49:38,535 uvicorn.error:216 uncategorized: Uvicorn running on http://0.0.0.0:8321 (Press CTRL+C ``` Signed-off-by: Anik Bhattacharjee Signed-off-by: Anik Bhattacharjee --- .../inline/agents/meta_reference/__init__.py | 2 +- .../inline/agents/meta_reference/agents.py | 2 +- .../responses/openai_responses.py | 10 +- .../meta_reference/responses/streaming.py | 3 +- .../agents/meta_reference/responses/utils.py | 6 +- src/llama_stack/providers/registry/agents.py | 4 +- .../meta_reference/test_safety_optional.py | 206 ++++++++++++++++++ 7 files changed, 227 insertions(+), 6 deletions(-) create mode 100644 tests/unit/providers/agents/meta_reference/test_safety_optional.py diff --git a/src/llama_stack/providers/inline/agents/meta_reference/__init__.py b/src/llama_stack/providers/inline/agents/meta_reference/__init__.py index 91287617a..b3fb814e3 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/__init__.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/__init__.py @@ -23,7 +23,7 @@ async def get_provider_impl( config, deps[Api.inference], deps[Api.vector_io], - deps[Api.safety], + deps.get(Api.safety), deps[Api.tool_runtime], deps[Api.tool_groups], deps[Api.conversations], diff --git a/src/llama_stack/providers/inline/agents/meta_reference/agents.py b/src/llama_stack/providers/inline/agents/meta_reference/agents.py index ba83a9576..2d5aa6c04 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/agents.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/agents.py @@ -41,7 +41,7 @@ class MetaReferenceAgentsImpl(Agents): config: MetaReferenceAgentsImplConfig, inference_api: Inference, vector_io_api: VectorIO, - safety_api: Safety, + safety_api: Safety | None, tool_runtime_api: ToolRuntime, tool_groups_api: ToolGroups, conversations_api: Conversations, diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py index 7e080a675..11bfb1417 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py @@ -67,7 +67,7 @@ class OpenAIResponsesImpl: tool_runtime_api: ToolRuntime, responses_store: ResponsesStore, vector_io_api: VectorIO, # VectorIO - safety_api: Safety, + safety_api: Safety | None, conversations_api: Conversations, ): self.inference_api = inference_api @@ -273,6 +273,14 @@ class OpenAIResponsesImpl: guardrail_ids = extract_guardrail_ids(guardrails) if guardrails else [] + # Validate that Safety API is available if guardrails are requested + if guardrail_ids and self.safety_api is None: + raise ValueError( + "Cannot process guardrails: Safety API is not configured.\n\n" + "To use guardrails, ensure the Safety API is configured in your stack, or remove " + "the 'guardrails' parameter from your request." + ) + if conversation is not None: if previous_response_id is not None: raise ValueError( diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py index cdbd87244..0ef74f1f1 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py @@ -66,6 +66,7 @@ from llama_stack_api import ( OpenAIResponseUsage, OpenAIResponseUsageInputTokensDetails, OpenAIResponseUsageOutputTokensDetails, + Safety, WebSearchToolTypes, ) @@ -111,7 +112,7 @@ class StreamingResponseOrchestrator: max_infer_iters: int, tool_executor, # Will be the tool execution logic from the main class instructions: str | None, - safety_api, + safety_api: Safety | None, guardrail_ids: list[str] | None = None, prompt: OpenAIResponsePrompt | None = None, parallel_tool_calls: bool | None = None, diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py index 943bbae41..25460bcfe 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py @@ -320,11 +320,15 @@ def is_function_tool_call( return False -async def run_guardrails(safety_api: Safety, messages: str, guardrail_ids: list[str]) -> str | None: +async def run_guardrails(safety_api: Safety | None, messages: str, guardrail_ids: list[str]) -> str | None: """Run guardrails against messages and return violation message if blocked.""" if not messages: return None + # If safety API is not available, skip guardrails + if safety_api is None: + return None + # Look up shields to get their provider_resource_id (actual model ID) model_ids = [] # TODO: list_shields not in Safety interface but available at runtime via API routing diff --git a/src/llama_stack/providers/registry/agents.py b/src/llama_stack/providers/registry/agents.py index 2c68750a6..e85be99d6 100644 --- a/src/llama_stack/providers/registry/agents.py +++ b/src/llama_stack/providers/registry/agents.py @@ -30,12 +30,14 @@ def available_providers() -> list[ProviderSpec]: config_class="llama_stack.providers.inline.agents.meta_reference.MetaReferenceAgentsImplConfig", api_dependencies=[ Api.inference, - Api.safety, Api.vector_io, Api.tool_runtime, Api.tool_groups, Api.conversations, ], + optional_api_dependencies=[ + Api.safety, + ], description="Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks.", ), ] diff --git a/tests/unit/providers/agents/meta_reference/test_safety_optional.py b/tests/unit/providers/agents/meta_reference/test_safety_optional.py new file mode 100644 index 000000000..b48d38b29 --- /dev/null +++ b/tests/unit/providers/agents/meta_reference/test_safety_optional.py @@ -0,0 +1,206 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +"""Tests for making Safety API optional in meta-reference agents provider. + +This test suite validates the changes introduced to fix issue #4165, which +allows running the meta-reference agents provider without the Safety API. +Safety API is now an optional dependency, and errors are raised at request time +when guardrails are explicitly requested without Safety API configured. +""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from llama_stack.core.datatypes import Api +from llama_stack.core.storage.datatypes import KVStoreReference, ResponsesStoreReference +from llama_stack.providers.inline.agents.meta_reference import get_provider_impl +from llama_stack.providers.inline.agents.meta_reference.config import ( + AgentPersistenceConfig, + MetaReferenceAgentsImplConfig, +) +from llama_stack.providers.inline.agents.meta_reference.responses.utils import ( + run_guardrails, +) + + +@pytest.fixture +def mock_persistence_config(): + """Create a mock persistence configuration.""" + return AgentPersistenceConfig( + agent_state=KVStoreReference( + backend="kv_default", + namespace="agents", + ), + responses=ResponsesStoreReference( + backend="sql_default", + table_name="responses", + ), + ) + + +@pytest.fixture +def mock_deps(): + """Create mock dependencies for the agents provider.""" + # Create mock APIs + inference_api = AsyncMock() + vector_io_api = AsyncMock() + tool_runtime_api = AsyncMock() + tool_groups_api = AsyncMock() + conversations_api = AsyncMock() + + return { + Api.inference: inference_api, + Api.vector_io: vector_io_api, + Api.tool_runtime: tool_runtime_api, + Api.tool_groups: tool_groups_api, + Api.conversations: conversations_api, + } + + +class TestProviderInitialization: + """Test provider initialization with different safety API configurations.""" + + async def test_initialization_with_safety_api_present(self, mock_persistence_config, mock_deps): + """Test successful initialization when Safety API is configured.""" + config = MetaReferenceAgentsImplConfig(persistence=mock_persistence_config) + + # Add safety API to deps + safety_api = AsyncMock() + mock_deps[Api.safety] = safety_api + + # Mock the initialize method to avoid actual initialization + with patch( + "llama_stack.providers.inline.agents.meta_reference.agents.MetaReferenceAgentsImpl.initialize", + new_callable=AsyncMock, + ): + # Should not raise any exception + provider = await get_provider_impl(config, mock_deps, policy=[], telemetry_enabled=False) + assert provider is not None + + async def test_initialization_without_safety_api(self, mock_persistence_config, mock_deps): + """Test successful initialization when Safety API is not configured.""" + config = MetaReferenceAgentsImplConfig(persistence=mock_persistence_config) + + # Safety API is NOT in mock_deps - provider should still start + # Mock the initialize method to avoid actual initialization + with patch( + "llama_stack.providers.inline.agents.meta_reference.agents.MetaReferenceAgentsImpl.initialize", + new_callable=AsyncMock, + ): + # Should not raise any exception + provider = await get_provider_impl(config, mock_deps, policy=[], telemetry_enabled=False) + assert provider is not None + assert provider.safety_api is None + + +class TestGuardrailsFunctionality: + """Test run_guardrails function with optional safety API.""" + + async def test_run_guardrails_with_none_safety_api(self): + """Test that run_guardrails returns None when safety_api is None.""" + result = await run_guardrails(safety_api=None, messages="test message", guardrail_ids=["llama-guard"]) + assert result is None + + async def test_run_guardrails_with_empty_messages(self): + """Test that run_guardrails returns None for empty messages.""" + # Test with None safety API + result = await run_guardrails(safety_api=None, messages="", guardrail_ids=["llama-guard"]) + assert result is None + + # Test with mock safety API + mock_safety_api = AsyncMock() + result = await run_guardrails(safety_api=mock_safety_api, messages="", guardrail_ids=["llama-guard"]) + assert result is None + + async def test_run_guardrails_with_none_safety_api_ignores_guardrails(self): + """Test that guardrails are skipped when safety_api is None, even if guardrail_ids are provided.""" + # Should not raise exception, just return None + result = await run_guardrails( + safety_api=None, + messages="potentially harmful content", + guardrail_ids=["llama-guard", "content-filter"], + ) + assert result is None + + async def test_create_response_rejects_guardrails_without_safety_api(self, mock_persistence_config, mock_deps): + """Test that create_openai_response raises error when guardrails requested but Safety API unavailable.""" + from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import ( + OpenAIResponsesImpl, + ) + from llama_stack_api import ResponseGuardrailSpec + + # Create OpenAIResponsesImpl with no safety API + with patch("llama_stack.providers.inline.agents.meta_reference.responses.openai_responses.ResponsesStore"): + impl = OpenAIResponsesImpl( + inference_api=mock_deps[Api.inference], + tool_groups_api=mock_deps[Api.tool_groups], + tool_runtime_api=mock_deps[Api.tool_runtime], + responses_store=MagicMock(), + vector_io_api=mock_deps[Api.vector_io], + safety_api=None, # No Safety API + conversations_api=mock_deps[Api.conversations], + ) + + # Test with string guardrail + with pytest.raises(ValueError) as exc_info: + await impl.create_openai_response( + input="test input", + model="test-model", + guardrails=["llama-guard"], + ) + assert "Cannot process guardrails: Safety API is not configured" in str(exc_info.value) + + # Test with ResponseGuardrailSpec + with pytest.raises(ValueError) as exc_info: + await impl.create_openai_response( + input="test input", + model="test-model", + guardrails=[ResponseGuardrailSpec(type="llama-guard")], + ) + assert "Cannot process guardrails: Safety API is not configured" in str(exc_info.value) + + async def test_create_response_succeeds_without_guardrails_and_no_safety_api( + self, mock_persistence_config, mock_deps + ): + """Test that create_openai_response works when no guardrails requested and Safety API unavailable.""" + from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import ( + OpenAIResponsesImpl, + ) + + # Create OpenAIResponsesImpl with no safety API + with ( + patch("llama_stack.providers.inline.agents.meta_reference.responses.openai_responses.ResponsesStore"), + patch.object(OpenAIResponsesImpl, "_create_streaming_response", new_callable=AsyncMock) as mock_stream, + ): + # Mock the streaming response to return a simple async generator + async def mock_generator(): + yield MagicMock() + + mock_stream.return_value = mock_generator() + + impl = OpenAIResponsesImpl( + inference_api=mock_deps[Api.inference], + tool_groups_api=mock_deps[Api.tool_groups], + tool_runtime_api=mock_deps[Api.tool_runtime], + responses_store=MagicMock(), + vector_io_api=mock_deps[Api.vector_io], + safety_api=None, # No Safety API + conversations_api=mock_deps[Api.conversations], + ) + + # Should not raise when no guardrails requested + # Note: This will still fail later in execution due to mocking, but should pass the validation + try: + await impl.create_openai_response( + input="test input", + model="test-model", + guardrails=None, # No guardrails + ) + except Exception as e: + # Ensure the error is NOT about missing Safety API + assert "Cannot process guardrails: Safety API is not configured" not in str(e) From 40b11efac44c9ac7fcc56d4f1f93f0d92f45f8c2 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Wed, 19 Nov 2025 10:07:53 -0800 Subject: [PATCH 03/14] feat(tests): add TypeScript client integration test support (#4185) Integration tests can now validate the TypeScript SDK alongside Python tests when running against server-mode stacks. Currently, this only adds a _small_ number of tests. We should extend only if truly needed -- this smoke check may be sufficient. When `RUN_CLIENT_TS_TESTS=1` is set, the test script runs TypeScript tests after Python tests pass. Tests are mapped via `tests/integration/client-typescript/suites.json` which defines which TypeScript test files correspond to each Python suite/setup combination. The fact that we need exact "test_id"s (which are actually generated by pytest) to be hardcoded inside the Typescript tests (so we hit the recorded paths) is a big smell and it might become grating, but maybe the benefit is worth it if we keep this test suite _small_ and targeted. ## Test Plan Run with TypeScript tests enabled: ```bash OPENAI_API_KEY=dummy RUN_CLIENT_TS_TESTS=1 \ scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt ``` --- .../setup-typescript-client/action.yml | 35 + .github/workflows/integration-tests.yml | 16 + .gitignore | 2 + scripts/get_setup_env.py | 24 +- scripts/integration-tests.sh | 76 + tests/integration/README.md | 20 + .../__tests__/inference.test.ts | 104 + .../__tests__/responses.test.ts | 132 + .../jest.integration.config.js | 31 + .../client-typescript/package-lock.json | 5507 +++++++++++++++++ .../client-typescript/package.json | 18 + .../client-typescript/run-tests.js | 63 + tests/integration/client-typescript/setup.ts | 162 + .../integration/client-typescript/suites.json | 12 + .../client-typescript/tsconfig.json | 16 + 15 files changed, 6208 insertions(+), 10 deletions(-) create mode 100644 .github/actions/setup-typescript-client/action.yml create mode 100644 tests/integration/client-typescript/__tests__/inference.test.ts create mode 100644 tests/integration/client-typescript/__tests__/responses.test.ts create mode 100644 tests/integration/client-typescript/jest.integration.config.js create mode 100644 tests/integration/client-typescript/package-lock.json create mode 100644 tests/integration/client-typescript/package.json create mode 100755 tests/integration/client-typescript/run-tests.js create mode 100644 tests/integration/client-typescript/setup.ts create mode 100644 tests/integration/client-typescript/suites.json create mode 100644 tests/integration/client-typescript/tsconfig.json diff --git a/.github/actions/setup-typescript-client/action.yml b/.github/actions/setup-typescript-client/action.yml new file mode 100644 index 000000000..8b78ba70c --- /dev/null +++ b/.github/actions/setup-typescript-client/action.yml @@ -0,0 +1,35 @@ +name: Setup TypeScript client +description: Conditionally checkout and link llama-stack-client-typescript based on client-version +inputs: + client-version: + description: 'Client version (latest or published)' + required: true + +outputs: + ts-client-path: + description: 'Path or version to use for TypeScript client' + value: ${{ steps.set-path.outputs.ts-client-path }} + +runs: + using: "composite" + steps: + - name: Checkout TypeScript client (latest) + if: ${{ inputs.client-version == 'latest' }} + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + repository: llamastack/llama-stack-client-typescript + ref: main + path: .ts-client-checkout + + - name: Set TS_CLIENT_PATH + id: set-path + shell: bash + run: | + if [ "${{ inputs.client-version }}" = "latest" ]; then + echo "ts-client-path=${{ github.workspace }}/.ts-client-checkout" >> $GITHUB_OUTPUT + elif [ "${{ inputs.client-version }}" = "published" ]; then + echo "ts-client-path=^0.3.2" >> $GITHUB_OUTPUT + else + echo "::error::Invalid client-version: ${{ inputs.client-version }}" + exit 1 + fi diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 71c7933b4..8073f6a15 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -93,11 +93,27 @@ jobs: suite: ${{ matrix.config.suite }} inference-mode: 'replay' + - name: Setup Node.js for TypeScript client tests + if: ${{ matrix.client == 'server' }} + uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: tests/integration/client-typescript/package-lock.json + + - name: Setup TypeScript client + if: ${{ matrix.client == 'server' }} + id: setup-ts-client + uses: ./.github/actions/setup-typescript-client + with: + client-version: ${{ matrix.client-version }} + - name: Run tests if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }} uses: ./.github/actions/run-and-record-tests env: OPENAI_API_KEY: dummy + TS_CLIENT_PATH: ${{ steps.setup-ts-client.outputs.ts-client-path || '' }} with: stack-config: >- ${{ matrix.config.stack_config diff --git a/.gitignore b/.gitignore index f5ca450b2..0d8fd5a2f 100644 --- a/.gitignore +++ b/.gitignore @@ -35,3 +35,5 @@ docs/static/imported-files/ docs/docs/api-deprecated/ docs/docs/api-experimental/ docs/docs/api/ +tests/integration/client-typescript/node_modules/ +.ts-client-checkout/ diff --git a/scripts/get_setup_env.py b/scripts/get_setup_env.py index fad601e76..755cfefea 100755 --- a/scripts/get_setup_env.py +++ b/scripts/get_setup_env.py @@ -16,16 +16,16 @@ import sys from tests.integration.suites import SETUP_DEFINITIONS, SUITE_DEFINITIONS -def get_setup_env_vars(setup_name, suite_name=None): +def get_setup_config(setup_name, suite_name=None): """ - Get environment variables for a setup, with optional suite default fallback. + Get full configuration (env vars + defaults) for a setup. Args: setup_name: Name of the setup (e.g., 'ollama', 'gpt') suite_name: Optional suite name to get default setup if setup_name is None Returns: - Dictionary of environment variables + Dictionary with 'env' and 'defaults' keys """ # If no setup specified, try to get default from suite if not setup_name and suite_name: @@ -34,7 +34,7 @@ def get_setup_env_vars(setup_name, suite_name=None): setup_name = suite.default_setup if not setup_name: - return {} + return {"env": {}, "defaults": {}} setup = SETUP_DEFINITIONS.get(setup_name) if not setup: @@ -44,27 +44,31 @@ def get_setup_env_vars(setup_name, suite_name=None): ) sys.exit(1) - return setup.env + return {"env": setup.env, "defaults": setup.defaults} def main(): - parser = argparse.ArgumentParser(description="Extract environment variables from a test setup") + parser = argparse.ArgumentParser(description="Extract environment variables and defaults from a test setup") parser.add_argument("--setup", help="Setup name (e.g., ollama, gpt)") parser.add_argument("--suite", help="Suite name to get default setup from if --setup not provided") parser.add_argument("--format", choices=["bash", "json"], default="bash", help="Output format (default: bash)") args = parser.parse_args() - env_vars = get_setup_env_vars(args.setup, args.suite) + config = get_setup_config(args.setup, args.suite) if args.format == "bash": - # Output as bash export statements - for key, value in env_vars.items(): + # Output env vars as bash export statements + for key, value in config["env"].items(): print(f"export {key}='{value}'") + # Output defaults as bash export statements with LLAMA_STACK_TEST_ prefix + for key, value in config["defaults"].items(): + env_key = f"LLAMA_STACK_TEST_{key.upper()}" + print(f"export {env_key}='{value}'") elif args.format == "json": import json - print(json.dumps(env_vars)) + print(json.dumps(config)) if __name__ == "__main__": diff --git a/scripts/integration-tests.sh b/scripts/integration-tests.sh index 8b0002125..20ecd0c4d 100755 --- a/scripts/integration-tests.sh +++ b/scripts/integration-tests.sh @@ -181,6 +181,10 @@ echo "$SETUP_ENV" eval "$SETUP_ENV" echo "" +# Export suite and setup names for TypeScript tests +export LLAMA_STACK_TEST_SUITE="$TEST_SUITE" +export LLAMA_STACK_TEST_SETUP="$TEST_SETUP" + ROOT_DIR="$THIS_DIR/.." cd $ROOT_DIR @@ -212,6 +216,71 @@ find_available_port() { return 1 } +run_client_ts_tests() { + if ! command -v npm &>/dev/null; then + echo "npm could not be found; ensure Node.js is installed" + return 1 + fi + + pushd tests/integration/client-typescript >/dev/null + + # Determine if TS_CLIENT_PATH is a directory path or an npm version + if [[ -d "$TS_CLIENT_PATH" ]]; then + # It's a directory path - use local checkout + if [[ ! -f "$TS_CLIENT_PATH/package.json" ]]; then + echo "Error: $TS_CLIENT_PATH exists but doesn't look like llama-stack-client-typescript (no package.json)" + popd >/dev/null + return 1 + fi + echo "Using local llama-stack-client-typescript from: $TS_CLIENT_PATH" + + # Build the TypeScript client first + echo "Building TypeScript client..." + pushd "$TS_CLIENT_PATH" >/dev/null + npm install --silent + npm run build --silent + popd >/dev/null + + # Install other dependencies first + if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then + npm ci --silent + else + npm install --silent + fi + + # Then install the client from local directory + echo "Installing llama-stack-client from: $TS_CLIENT_PATH" + npm install "$TS_CLIENT_PATH" --silent + else + # It's an npm version specifier - install from npm + echo "Installing llama-stack-client@${TS_CLIENT_PATH} from npm" + if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then + npm ci --silent + npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent + else + npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent + fi + fi + + # Verify installation + echo "Verifying llama-stack-client installation..." + if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then + echo "✅ llama-stack-client successfully installed" + npm list llama-stack-client + else + echo "❌ llama-stack-client not found in node_modules" + echo "Installed packages:" + npm list --depth=0 + popd >/dev/null + return 1 + fi + + echo "Running TypeScript tests for suite $TEST_SUITE (setup $TEST_SETUP)" + npm test + + popd >/dev/null +} + # Start Llama Stack Server if needed if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then # Find an available port for the server @@ -221,6 +290,7 @@ if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then exit 1 fi export LLAMA_STACK_PORT + export TEST_API_BASE_URL="http://localhost:$LLAMA_STACK_PORT" echo "Will use port: $LLAMA_STACK_PORT" stop_server() { @@ -298,6 +368,7 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then exit 1 fi export LLAMA_STACK_PORT + export TEST_API_BASE_URL="http://localhost:$LLAMA_STACK_PORT" echo "Will use port: $LLAMA_STACK_PORT" echo "=== Building Docker Image for distribution: $DISTRO ===" @@ -506,5 +577,10 @@ else exit 1 fi +# Run TypeScript client tests if TS_CLIENT_PATH is set +if [[ $exit_code -eq 0 && -n "${TS_CLIENT_PATH:-}" && "${LLAMA_STACK_TEST_STACK_CONFIG_TYPE:-}" == "server" ]]; then + run_client_ts_tests +fi + echo "" echo "=== Integration Tests Complete ===" diff --git a/tests/integration/README.md b/tests/integration/README.md index f581073ae..3559b785c 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -211,3 +211,23 @@ def test_asymmetric_embeddings(llama_stack_client, embedding_model_id): assert query_response.embeddings is not None ``` + +## TypeScript Client Replays + +TypeScript SDK tests can run alongside Python tests when testing against `server:` stacks. Set `TS_CLIENT_PATH` to the path or version of `llama-stack-client-typescript` to enable: + +```bash +# Use published npm package (responses suite) +TS_CLIENT_PATH=^0.3.2 scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt + +# Use local checkout from ~/.cache (recommended for development) +git clone https://github.com/llamastack/llama-stack-client-typescript.git ~/.cache/llama-stack-client-typescript +TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt + +# Run base suite with TypeScript tests +TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite base --setup ollama +``` + +TypeScript tests run immediately after Python tests pass, using the same replay fixtures. The mapping between Python suites/setups and TypeScript test files is defined in `tests/integration/client-typescript/suites.json`. + +If `TS_CLIENT_PATH` is unset, TypeScript tests are skipped entirely. diff --git a/tests/integration/client-typescript/__tests__/inference.test.ts b/tests/integration/client-typescript/__tests__/inference.test.ts new file mode 100644 index 000000000..b0734fed7 --- /dev/null +++ b/tests/integration/client-typescript/__tests__/inference.test.ts @@ -0,0 +1,104 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// All rights reserved. +// +// This source code is licensed under the terms described in the LICENSE file in +// the root directory of this source tree. + +/** + * Integration tests for Inference API (Chat Completions). + * Ported from: llama-stack/tests/integration/inference/test_openai_completion.py + * + * IMPORTANT: Test cases must match EXACTLY with Python tests to use recorded API responses. + */ + +import { createTestClient, requireTextModel } from '../setup'; + +describe('Inference API - Chat Completions', () => { + // Test cases matching llama-stack/tests/integration/test_cases/inference/chat_completion.json + const chatCompletionTestCases = [ + { + id: 'non_streaming_01', + question: 'Which planet do humans live on?', + expected: 'earth', + testId: + 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:non_streaming_01]', + }, + { + id: 'non_streaming_02', + question: 'Which planet has rings around it with a name starting with letter S?', + expected: 'saturn', + testId: + 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:non_streaming_02]', + }, + ]; + + const streamingTestCases = [ + { + id: 'streaming_01', + question: "What's the name of the Sun in latin?", + expected: 'sol', + testId: + 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:streaming_01]', + }, + { + id: 'streaming_02', + question: 'What is the name of the US captial?', + expected: 'washington', + testId: + 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:streaming_02]', + }, + ]; + + test.each(chatCompletionTestCases)( + 'chat completion non-streaming: $id', + async ({ question, expected, testId }) => { + const client = createTestClient(testId); + const textModel = requireTextModel(); + + const response = await client.chat.completions.create({ + model: textModel, + messages: [ + { + role: 'user', + content: question, + }, + ], + stream: false, + }); + + // Non-streaming responses have choices with message property + const choice = response.choices[0]; + expect(choice).toBeDefined(); + if (!choice || !('message' in choice)) { + throw new Error('Expected non-streaming response with message'); + } + const content = choice.message.content; + expect(content).toBeDefined(); + const messageContent = typeof content === 'string' ? content.toLowerCase().trim() : ''; + expect(messageContent.length).toBeGreaterThan(0); + expect(messageContent).toContain(expected.toLowerCase()); + }, + ); + + test.each(streamingTestCases)('chat completion streaming: $id', async ({ question, expected, testId }) => { + const client = createTestClient(testId); + const textModel = requireTextModel(); + + const stream = await client.chat.completions.create({ + model: textModel, + messages: [{ role: 'user', content: question }], + stream: true, + }); + + const streamedContent: string[] = []; + for await (const chunk of stream) { + if (chunk.choices && chunk.choices.length > 0 && chunk.choices[0]?.delta?.content) { + streamedContent.push(chunk.choices[0].delta.content); + } + } + + expect(streamedContent.length).toBeGreaterThan(0); + const fullContent = streamedContent.join('').toLowerCase().trim(); + expect(fullContent).toContain(expected.toLowerCase()); + }); +}); diff --git a/tests/integration/client-typescript/__tests__/responses.test.ts b/tests/integration/client-typescript/__tests__/responses.test.ts new file mode 100644 index 000000000..0fc2a3245 --- /dev/null +++ b/tests/integration/client-typescript/__tests__/responses.test.ts @@ -0,0 +1,132 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// All rights reserved. +// +// This source code is licensed under the terms described in the LICENSE file in +// the root directory of this source tree. + +/** + * Integration tests for Responses API. + * Ported from: llama-stack/tests/integration/responses/test_basic_responses.py + * + * IMPORTANT: Test cases and IDs must match EXACTLY with Python tests to use recorded API responses. + */ + +import { createTestClient, requireTextModel, getResponseOutputText } from '../setup'; + +describe('Responses API - Basic', () => { + // Test cases matching llama-stack/tests/integration/responses/fixtures/test_cases.py + const basicTestCases = [ + { + id: 'earth', + input: 'Which planet do humans live on?', + expected: 'earth', + // Use client_with_models fixture to match non-streaming recordings + testId: + 'tests/integration/responses/test_basic_responses.py::test_response_non_streaming_basic[client_with_models-txt=openai/gpt-4o-earth]', + }, + { + id: 'saturn', + input: 'Which planet has rings around it with a name starting with letter S?', + expected: 'saturn', + testId: + 'tests/integration/responses/test_basic_responses.py::test_response_non_streaming_basic[client_with_models-txt=openai/gpt-4o-saturn]', + }, + ]; + + test.each(basicTestCases)('non-streaming basic response: $id', async ({ input, expected, testId }) => { + // Create client with test_id for all requests + const client = createTestClient(testId); + const textModel = requireTextModel(); + + // Create a response + const response = await client.responses.create({ + model: textModel, + input, + stream: false, + }); + + // Verify response has content + const outputText = getResponseOutputText(response).toLowerCase().trim(); + expect(outputText.length).toBeGreaterThan(0); + expect(outputText).toContain(expected.toLowerCase()); + + // Verify usage is reported + expect(response.usage).toBeDefined(); + expect(response.usage!.input_tokens).toBeGreaterThan(0); + expect(response.usage!.output_tokens).toBeGreaterThan(0); + expect(response.usage!.total_tokens).toBe(response.usage!.input_tokens + response.usage!.output_tokens); + + // Verify stored response matches + const retrievedResponse = await client.responses.retrieve(response.id); + expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(response)); + + // Test follow-up with previous_response_id + const nextResponse = await client.responses.create({ + model: textModel, + input: 'Repeat your previous response in all caps.', + previous_response_id: response.id, + }); + const nextOutputText = getResponseOutputText(nextResponse).trim(); + expect(nextOutputText).toContain(expected.toUpperCase()); + }); + + test.each(basicTestCases)('streaming basic response: $id', async ({ input, expected, testId }) => { + // Modify test_id for streaming variant + const streamingTestId = testId.replace( + 'test_response_non_streaming_basic', + 'test_response_streaming_basic', + ); + const client = createTestClient(streamingTestId); + const textModel = requireTextModel(); + + // Create a streaming response + const stream = await client.responses.create({ + model: textModel, + input, + stream: true, + }); + + const events: any[] = []; + let responseId = ''; + + for await (const chunk of stream) { + events.push(chunk); + + if (chunk.type === 'response.created') { + // Verify response.created is the first event + expect(events.length).toBe(1); + expect(chunk.response.status).toBe('in_progress'); + responseId = chunk.response.id; + } else if (chunk.type === 'response.completed') { + // Verify response.completed comes after response.created + expect(events.length).toBeGreaterThanOrEqual(2); + expect(chunk.response.status).toBe('completed'); + expect(chunk.response.id).toBe(responseId); + + // Verify content quality + const outputText = getResponseOutputText(chunk.response).toLowerCase().trim(); + expect(outputText.length).toBeGreaterThan(0); + expect(outputText).toContain(expected.toLowerCase()); + + // Verify usage is reported + expect(chunk.response.usage).toBeDefined(); + expect(chunk.response.usage!.input_tokens).toBeGreaterThan(0); + expect(chunk.response.usage!.output_tokens).toBeGreaterThan(0); + expect(chunk.response.usage!.total_tokens).toBe( + chunk.response.usage!.input_tokens + chunk.response.usage!.output_tokens, + ); + } + } + + // Verify we got both events + expect(events.length).toBeGreaterThanOrEqual(2); + const firstEvent = events[0]; + const lastEvent = events[events.length - 1]; + expect(firstEvent.type).toBe('response.created'); + expect(lastEvent.type).toBe('response.completed'); + + // Verify stored response matches streamed response + const retrievedResponse = await client.responses.retrieve(responseId); + expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(lastEvent.response)); + }); +}); diff --git a/tests/integration/client-typescript/jest.integration.config.js b/tests/integration/client-typescript/jest.integration.config.js new file mode 100644 index 000000000..769bd177a --- /dev/null +++ b/tests/integration/client-typescript/jest.integration.config.js @@ -0,0 +1,31 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// All rights reserved. +// +// This source code is licensed under the terms described in the LICENSE file in +// the root directory of this source tree. + +/** @type {import('ts-jest').JestConfigWithTsJest} */ +module.exports = { + preset: 'ts-jest/presets/default-esm', + testEnvironment: 'node', + extensionsToTreatAsEsm: ['.ts'], + moduleNameMapper: { + '^(\\.{1,2}/.*)\\.js$': '$1', + }, + transform: { + '^.+\\.tsx?$': [ + 'ts-jest', + { + useESM: true, + tsconfig: { + module: 'ES2022', + moduleResolution: 'bundler', + }, + }, + ], + }, + testMatch: ['/__tests__/**/*.test.ts'], + setupFilesAfterEnv: ['/setup.ts'], + testTimeout: 60000, // 60 seconds (integration tests can be slow) + watchman: false, // Disable watchman to avoid permission issues +}; diff --git a/tests/integration/client-typescript/package-lock.json b/tests/integration/client-typescript/package-lock.json new file mode 100644 index 000000000..f118a07e3 --- /dev/null +++ b/tests/integration/client-typescript/package-lock.json @@ -0,0 +1,5507 @@ +{ + "name": "llama-stack-typescript-integration-tests", + "version": "0.0.1", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "llama-stack-typescript-integration-tests", + "version": "0.0.1", + "dependencies": { + "llama-stack-client": "^0.3.2" + }, + "devDependencies": { + "@swc/core": "^1.3.102", + "@swc/jest": "^0.2.29", + "@types/jest": "^29.4.0", + "@types/node": "^20.0.0", + "jest": "^29.4.0", + "ts-jest": "^29.1.0", + "typescript": "^5.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz", + "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz", + "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.5" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/console/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/console/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/console/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/core/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/core/node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/create-cache-key-function": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/@jest/create-cache-key-function/-/create-cache-key-function-30.2.0.tgz", + "integrity": "sha512-44F4l4Enf+MirJN8X/NhdGkl71k5rBYiwdVlo4HxOwbu0sHV8QKrGEedb1VUU4K3W7fBKE0HGfbn7eZm0Ti3zg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "30.2.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/environment/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/environment/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/environment/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/pattern": { + "version": "30.0.1", + "resolved": "https://registry.npmjs.org/@jest/pattern/-/pattern-30.0.1.tgz", + "integrity": "sha512-gWp7NfQW27LaBQz3TITS8L7ZCQ0TLvtmI//4OwlQRx4rnWxcPNIYjxZpDcN4+UlGxgm3jS5QPz8IPTCkb59wZA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-regex-util": "30.0.1" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/reporters/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/schemas": { + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz", + "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.34.0" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jest/transform/node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "30.2.0", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-30.2.0.tgz", + "integrity": "sha512-H9xg1/sfVvyfU7o3zMfBEjQ1gcsdeTMgqHoYdN79tuLqfTtuu7WckRA1R5whDwOzxaZAeMKTYWqP+WCAi0CHsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/pattern": "30.0.1", + "@jest/schemas": "30.0.5", + "@types/istanbul-lib-coverage": "^2.0.6", + "@types/istanbul-reports": "^3.0.4", + "@types/node": "*", + "@types/yargs": "^17.0.33", + "chalk": "^4.1.2" + }, + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.34.41", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz", + "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@swc/core": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.15.2.tgz", + "integrity": "sha512-OQm+yJdXxvSjqGeaWhP6Ia264ogifwAO7Q12uTDVYj/Ks4jBTI4JknlcjDRAXtRhqbWsfbZyK/5RtuIPyptk3w==", + "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3", + "@swc/types": "^0.1.25" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/swc" + }, + "optionalDependencies": { + "@swc/core-darwin-arm64": "1.15.2", + "@swc/core-darwin-x64": "1.15.2", + "@swc/core-linux-arm-gnueabihf": "1.15.2", + "@swc/core-linux-arm64-gnu": "1.15.2", + "@swc/core-linux-arm64-musl": "1.15.2", + "@swc/core-linux-x64-gnu": "1.15.2", + "@swc/core-linux-x64-musl": "1.15.2", + "@swc/core-win32-arm64-msvc": "1.15.2", + "@swc/core-win32-ia32-msvc": "1.15.2", + "@swc/core-win32-x64-msvc": "1.15.2" + }, + "peerDependencies": { + "@swc/helpers": ">=0.5.17" + }, + "peerDependenciesMeta": { + "@swc/helpers": { + "optional": true + } + } + }, + "node_modules/@swc/core-darwin-arm64": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.15.2.tgz", + "integrity": "sha512-Ghyz4RJv4zyXzrUC1B2MLQBbppIB5c4jMZJybX2ebdEQAvryEKp3gq1kBksCNsatKGmEgXul88SETU19sMWcrw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-darwin-x64": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.15.2.tgz", + "integrity": "sha512-7n/PGJOcL2QoptzL42L5xFFfXY5rFxLHnuz1foU+4ruUTG8x2IebGhtwVTpaDN8ShEv2UZObBlT1rrXTba15Zw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm-gnueabihf": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.15.2.tgz", + "integrity": "sha512-ZUQVCfRJ9wimuxkStRSlLwqX4TEDmv6/J+E6FicGkQ6ssLMWoKDy0cAo93HiWt/TWEee5vFhFaSQYzCuBEGO6A==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-gnu": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.15.2.tgz", + "integrity": "sha512-GZh3pYBmfnpQ+JIg+TqLuz+pM+Mjsk5VOzi8nwKn/m+GvQBsxD5ectRtxuWUxMGNG8h0lMy4SnHRqdK3/iJl7A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-musl": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.15.2.tgz", + "integrity": "sha512-5av6VYZZeneiYIodwzGMlnyVakpuYZryGzFIbgu1XP8wVylZxduEzup4eP8atiMDFmIm+s4wn8GySJmYqeJC0A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-gnu": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.15.2.tgz", + "integrity": "sha512-1nO/UfdCLuT/uE/7oB3EZgTeZDCIa6nL72cFEpdegnqpJVNDI6Qb8U4g/4lfVPkmHq2lvxQ0L+n+JdgaZLhrRA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-musl": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.15.2.tgz", + "integrity": "sha512-Ksfrb0Tx310kr+TLiUOvB/I80lyZ3lSOp6cM18zmNRT/92NB4mW8oX2Jo7K4eVEI2JWyaQUAFubDSha2Q+439A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-arm64-msvc": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.15.2.tgz", + "integrity": "sha512-IzUb5RlMUY0r1A9IuJrQ7Tbts1wWb73/zXVXT8VhewbHGoNlBKE0qUhKMED6Tv4wDF+pmbtUJmKXDthytAvLmg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-ia32-msvc": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.15.2.tgz", + "integrity": "sha512-kCATEzuY2LP9AlbU2uScjcVhgnCAkRdu62vbce17Ro5kxEHxYWcugkveyBRS3AqZGtwAKYbMAuNloer9LS/hpw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-x64-msvc": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.15.2.tgz", + "integrity": "sha512-iJaHeYCF4jTn7OEKSa3KRiuVFIVYts8jYjNmCdyz1u5g8HRyTDISD76r8+ljEOgm36oviRQvcXaw6LFp1m0yyA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/@swc/jest": { + "version": "0.2.39", + "resolved": "https://registry.npmjs.org/@swc/jest/-/jest-0.2.39.tgz", + "integrity": "sha512-eyokjOwYd0Q8RnMHri+8/FS1HIrIUKK/sRrFp8c1dThUOfNeCWbLmBP1P5VsKdvmkd25JaH+OKYwEYiAYg9YAA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/create-cache-key-function": "^30.0.0", + "@swc/counter": "^0.1.3", + "jsonc-parser": "^3.2.0" + }, + "engines": { + "npm": ">= 7.0.0" + }, + "peerDependencies": { + "@swc/core": "*" + } + }, + "node_modules/@swc/types": { + "version": "0.1.25", + "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.25.tgz", + "integrity": "sha512-iAoY/qRhNH8a/hBvm3zKj9qQ4oc2+3w1unPJa2XvTK3XjeLXtzcCingVPw/9e5mn1+0yPqxcBGp9Jf0pkfMb1g==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "29.5.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", + "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "expect": "^29.0.0", + "pretty-format": "^29.0.0" + } + }, + "node_modules/@types/node": { + "version": "20.19.25", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.25.tgz", + "integrity": "sha512-ZsJzA5thDQMSQO788d7IocwwQbI8B5OPzmqNvpf3NY/+MHDAS759Wo0gd2WQeXYt5AAAQjzcrTVC6SKCuYgoCQ==", + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/node-fetch": { + "version": "2.6.13", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz", + "integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "form-data": "^4.0.4" + } + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/yargs": { + "version": "17.0.35", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.35.tgz", + "integrity": "sha512-qUHkeCyQFxMXg79wQfTtfndEC+N9ZZg76HJftDJp+qH2tV7Gj4OJi7l+PiWwJ+pWtW8GwSmqsDj/oymhrTWXjg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "license": "MIT", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, + "node_modules/agentkeepalive": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", + "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", + "license": "MIT", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 8.0.0" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz", + "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" + }, + "peerDependencies": { + "@babel/core": "^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "license": "MIT", + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.8.29", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.29.tgz", + "integrity": "sha512-sXdt2elaVnhpDNRDz+1BDx1JQoJRuNk7oVlAlbGiFkLikHCAQiccexF/9e91zVi6RCgqspl04aP+6Cnl9zRLrA==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.0.tgz", + "integrity": "sha512-tbydkR/CxfMwelN0vwdP/pLkDwyAASZ+VfWm4EOwlB6SWhx1sYnWLqo8N5j0rAzPfzfRaxt0mM/4wPU/Su84RQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.8.25", + "caniuse-lite": "^1.0.30001754", + "electron-to-chromium": "^1.5.249", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.1.4" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001755", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001755.tgz", + "integrity": "sha512-44V+Jm6ctPj7R52Na4TLi3Zri4dWUljJd+RDm+j8LtNCc/ihLCT+X1TzoOAkRETEWqjuLnh9581Tl80FvK7jVA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.3.tgz", + "integrity": "sha512-1L5aqIkwPfiodaMgQunkF1zRhNqifHBmtbbbxcr6yVxxBnliw4TDOW6NxpO8DJLgJ16OT+Y4ztZqP6p/FtXnAw==", + "dev": true, + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/create-jest/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/create-jest/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/create-jest/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/dedent": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.0.tgz", + "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.255", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.255.tgz", + "integrity": "sha512-Z9oIp4HrFF/cZkDPMpz2XSuVpc1THDpT4dlmATFlJUIBVCy9Vap5/rIXsASP1CscBacBqhabwh8vLctqBwEerQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/form-data-encoder": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", + "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==", + "license": "MIT" + }, + "node_modules/formdata-node": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", + "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", + "license": "MIT", + "dependencies": { + "node-domexception": "1.0.0", + "web-streams-polyfill": "4.0.0-beta.3" + }, + "engines": { + "node": ">= 12.20" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.0.0" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-cli/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-config/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-config/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-config/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-config/node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-haste-map/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-haste-map/node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "30.0.1", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-30.0.1.tgz", + "integrity": "sha512-jHEQgBXAgc+Gh4g0p3bCevgRCVRkB4VB70zhoAE48gxeSr1hfUOsM/C2WoJgVL7Eyg//hudYENbm3Ne+/dRVVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies/node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-runtime/node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-snapshot/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-watcher/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-watcher/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-watcher/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jest/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest/node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonc-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.3.1.tgz", + "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/llama-stack-client": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/llama-stack-client/-/llama-stack-client-0.3.2.tgz", + "integrity": "sha512-vzcnIN6k3sp7dhMXSnyrPSd82ACH/H3snj2uF6DgZwZCacKQNp2Y5XIT5qZZgoM1EUXbaxdVYFCeWD9yNCwatw==", + "license": "MIT", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7" + } + }, + "node_modules/llama-stack-client/node_modules/@types/node": { + "version": "18.19.130", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz", + "integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==", + "license": "MIT", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/llama-stack-client/node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true, + "license": "ISC" + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "deprecated": "Use your platform's native DOMException instead", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "license": "MIT" + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" + }, + "node_modules/ts-jest": { + "version": "29.4.5", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.5.tgz", + "integrity": "sha512-HO3GyiWn2qvTQA4kTgjDcXiMwYQt68a1Y8+JuLRVpdIzm+UOLSHgl/XqR4c6nzJkq5rOkjc02O2I7P7l/Yof0Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "bs-logger": "^0.2.6", + "fast-json-stable-stringify": "^2.1.0", + "handlebars": "^4.7.8", + "json5": "^2.2.3", + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.7.3", + "type-fest": "^4.41.0", + "yargs-parser": "^21.1.1" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0 || ^30.0.0", + "@jest/types": "^29.0.0 || ^30.0.0", + "babel-jest": "^29.0.0 || ^30.0.0", + "jest": "^29.0.0 || ^30.0.0", + "jest-util": "^29.0.0 || ^30.0.0", + "typescript": ">=4.3 <6" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@jest/transform": { + "optional": true + }, + "@jest/types": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jest-util": { + "optional": true + } + } + }, + "node_modules/ts-jest/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ts-jest/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "license": "MIT" + }, + "node_modules/update-browserslist-db": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.4.tgz", + "integrity": "sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/web-streams-polyfill": { + "version": "4.0.0-beta.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", + "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/tests/integration/client-typescript/package.json b/tests/integration/client-typescript/package.json new file mode 100644 index 000000000..e5fe1b8f5 --- /dev/null +++ b/tests/integration/client-typescript/package.json @@ -0,0 +1,18 @@ +{ + "name": "llama-stack-typescript-integration-tests", + "version": "0.0.1", + "private": true, + "description": "TypeScript client integration tests for Llama Stack", + "scripts": { + "test": "node run-tests.js" + }, + "devDependencies": { + "@swc/core": "^1.3.102", + "@swc/jest": "^0.2.29", + "@types/jest": "^29.4.0", + "@types/node": "^20.0.0", + "jest": "^29.4.0", + "ts-jest": "^29.1.0", + "typescript": "^5.0.0" + } +} diff --git a/tests/integration/client-typescript/run-tests.js b/tests/integration/client-typescript/run-tests.js new file mode 100755 index 000000000..93df5d8a0 --- /dev/null +++ b/tests/integration/client-typescript/run-tests.js @@ -0,0 +1,63 @@ +#!/usr/bin/env node +// Copyright (c) Meta Platforms, Inc. and affiliates. +// All rights reserved. +// +// This source code is licensed under the terms described in the LICENSE file in +// the root directory of this source tree. + +/** + * Test runner that finds and executes TypeScript tests based on suite/setup mapping. + * Called by integration-tests.sh via npm test. + */ + +const fs = require('fs'); +const path = require('path'); +const { execSync } = require('child_process'); + +const suite = process.env.LLAMA_STACK_TEST_SUITE; +const setup = process.env.LLAMA_STACK_TEST_SETUP || ''; + +if (!suite) { + console.error('Error: LLAMA_STACK_TEST_SUITE environment variable is required'); + process.exit(1); +} + +// Read suites.json to find matching test files +const suitesPath = path.join(__dirname, 'suites.json'); +if (!fs.existsSync(suitesPath)) { + console.log(`No TypeScript tests configured (${suitesPath} not found)`); + process.exit(0); +} + +const suites = JSON.parse(fs.readFileSync(suitesPath, 'utf-8')); + +// Find matching entry +let testFiles = []; +for (const entry of suites) { + if (entry.suite !== suite) { + continue; + } + const entrySetup = entry.setup || ''; + if (entrySetup && entrySetup !== setup) { + continue; + } + testFiles = entry.files || []; + break; +} + +if (testFiles.length === 0) { + console.log(`No TypeScript integration tests mapped for suite ${suite} (setup ${setup})`); + process.exit(0); +} + +console.log(`Running TypeScript tests for suite ${suite} (setup ${setup}): ${testFiles.join(', ')}`); + +// Run Jest with the mapped test files +try { + execSync(`npx jest --config jest.integration.config.js ${testFiles.join(' ')}`, { + stdio: 'inherit', + cwd: __dirname, + }); +} catch (error) { + process.exit(error.status || 1); +} diff --git a/tests/integration/client-typescript/setup.ts b/tests/integration/client-typescript/setup.ts new file mode 100644 index 000000000..75cabab74 --- /dev/null +++ b/tests/integration/client-typescript/setup.ts @@ -0,0 +1,162 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// All rights reserved. +// +// This source code is licensed under the terms described in the LICENSE file in +// the root directory of this source tree. + +/** + * Global setup for integration tests. + * This file mimics pytest's fixture system by providing shared test configuration. + */ + +import LlamaStackClient from 'llama-stack-client'; + +/** + * Load test configuration from the Python setup system. + * This reads setup definitions from tests/integration/suites.py via get_setup_env.py. + */ +function loadTestConfig() { + const baseURL = process.env['TEST_API_BASE_URL']; + const setupName = process.env['LLAMA_STACK_TEST_SETUP']; + const textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL']; + const embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL']; + + if (!baseURL) { + throw new Error( + 'TEST_API_BASE_URL is required for integration tests. ' + + 'Run tests using: ./scripts/integration-test.sh', + ); + } + + return { + baseURL, + textModel, + embeddingModel, + setupName, + }; +} + +// Read configuration from environment variables (set by scripts/integration-test.sh) +export const TEST_CONFIG = loadTestConfig(); + +// Validate required configuration +beforeAll(() => { + console.log('\n=== Integration Test Configuration ==='); + console.log(`Base URL: ${TEST_CONFIG.baseURL}`); + console.log(`Setup: ${TEST_CONFIG.setupName || 'NOT SET'}`); + console.log( + `Text Model: ${TEST_CONFIG.textModel || 'NOT SET - tests requiring text model will be skipped'}`, + ); + console.log( + `Embedding Model: ${ + TEST_CONFIG.embeddingModel || 'NOT SET - tests requiring embedding model will be skipped' + }`, + ); + console.log('=====================================\n'); +}); + +/** + * Create a client instance for integration tests. + * Mimics pytest's `llama_stack_client` fixture. + * + * @param testId - Test ID to send in X-LlamaStack-Provider-Data header for replay mode. + * Format: "tests/integration/responses/test_basic_responses.py::test_name[params]" + */ +export function createTestClient(testId?: string): LlamaStackClient { + const headers: Record = {}; + + // In server mode with replay, send test ID for recording isolation + if (process.env['LLAMA_STACK_TEST_STACK_CONFIG_TYPE'] === 'server' && testId) { + headers['X-LlamaStack-Provider-Data'] = JSON.stringify({ + __test_id: testId, + }); + } + + return new LlamaStackClient({ + baseURL: TEST_CONFIG.baseURL, + timeout: 60000, // 60 seconds + defaultHeaders: headers, + }); +} + +/** + * Skip test if required model is not configured. + * Mimics pytest's `skip_if_no_model` autouse fixture. + */ +export function skipIfNoModel(modelType: 'text' | 'embedding'): typeof test { + const model = modelType === 'text' ? TEST_CONFIG.textModel : TEST_CONFIG.embeddingModel; + + if (!model) { + const envVar = modelType === 'text' ? 'LLAMA_STACK_TEST_TEXT_MODEL' : 'LLAMA_STACK_TEST_EMBEDDING_MODEL'; + const message = `Skipping: ${modelType} model not configured (set ${envVar})`; + return test.skip.bind(test) as typeof test; + } + + return test; +} + +/** + * Get the configured text model, throwing if not set. + * Use this in tests that absolutely require a text model. + */ +export function requireTextModel(): string { + if (!TEST_CONFIG.textModel) { + throw new Error( + 'LLAMA_STACK_TEST_TEXT_MODEL environment variable is required. ' + + 'Run tests using: ./scripts/integration-test.sh', + ); + } + return TEST_CONFIG.textModel; +} + +/** + * Get the configured embedding model, throwing if not set. + * Use this in tests that absolutely require an embedding model. + */ +export function requireEmbeddingModel(): string { + if (!TEST_CONFIG.embeddingModel) { + throw new Error( + 'LLAMA_STACK_TEST_EMBEDDING_MODEL environment variable is required. ' + + 'Run tests using: ./scripts/integration-test.sh', + ); + } + return TEST_CONFIG.embeddingModel; +} + +/** + * Extracts aggregated text output from a ResponseObject. + * This concatenates all text content from the response's output array. + * + * Copied from llama-stack-client's response-helpers until it's available in published version. + */ +export function getResponseOutputText(response: any): string { + const pieces: string[] = []; + + for (const output of response.output ?? []) { + if (!output || output.type !== 'message') { + continue; + } + + const content = output.content; + if (typeof content === 'string') { + pieces.push(content); + continue; + } + + if (!Array.isArray(content)) { + continue; + } + + for (const item of content) { + if (typeof item === 'string') { + pieces.push(item); + continue; + } + if (item && item.type === 'output_text' && 'text' in item && typeof item.text === 'string') { + pieces.push(item.text); + } + } + } + + return pieces.join(''); +} diff --git a/tests/integration/client-typescript/suites.json b/tests/integration/client-typescript/suites.json new file mode 100644 index 000000000..5c5b83058 --- /dev/null +++ b/tests/integration/client-typescript/suites.json @@ -0,0 +1,12 @@ +[ + { + "suite": "responses", + "setup": "gpt", + "files": ["__tests__/responses.test.ts"] + }, + { + "suite": "base", + "setup": "ollama", + "files": ["__tests__/inference.test.ts"] + } +] diff --git a/tests/integration/client-typescript/tsconfig.json b/tests/integration/client-typescript/tsconfig.json new file mode 100644 index 000000000..19b6cdeb1 --- /dev/null +++ b/tests/integration/client-typescript/tsconfig.json @@ -0,0 +1,16 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "lib": ["ES2022"], + "moduleResolution": "bundler", + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "skipLibCheck": true, + "resolveJsonModule": true, + "types": ["jest", "node"] + }, + "include": ["**/*.ts"], + "exclude": ["node_modules"] +} From 1e4e02e6224b5a571dde6a181436c52f1ea8a34f Mon Sep 17 00:00:00 2001 From: Sam El-Borai Date: Wed, 19 Nov 2025 19:09:12 +0100 Subject: [PATCH 04/14] fix(ci): prefix stainless branches with fork author (#4187) # What does this PR do? I believe that should avoid CI issues seen in https://github.com/llamastack/llama-stack/pull/4173. Error we see in Stainless logs: ``` (cannot lock ref 'refs/heads/preview/base/fix/issue-3797-metadata-validation': 'refs/heads/preview/base/fix' exists; cannot create 'refs/heads/preview/base/fix/issue-3797-metadata-validation') ``` The issue is that if a branch `fix` exists, `fix/` cannot be created (that's how git refs work unfortunately...). The fix in this PR is to ensure PRs from forks are using the author as a prefix. In addition we will do changes to the Stainless API to return better error messages here, it should have been a 4xx with a meaningful error, not a 500. And we will likely need to delete the `fix` branch. ## Test Plan --- .github/workflows/stainless-builds.yml | 56 ++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 4 deletions(-) diff --git a/.github/workflows/stainless-builds.yml b/.github/workflows/stainless-builds.yml index 00c5e3df5..a18c70887 100644 --- a/.github/workflows/stainless-builds.yml +++ b/.github/workflows/stainless-builds.yml @@ -59,6 +59,30 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 2 + # Compute the Stainless branch name, prefixing with fork owner if PR is from a fork. + # For fork PRs like "contributor:fix/issue-123", this creates "preview/contributor/fix/issue-123" + # For same-repo PRs, this creates "preview/fix/issue-123" + - name: Compute branch names + id: branch-names + run: | + HEAD_REPO="${{ github.event.pull_request.head.repo.full_name }}" + BASE_REPO="${{ github.repository }}" + BRANCH_NAME="${{ github.event.pull_request.head.ref }}" + + if [ "$HEAD_REPO" != "$BASE_REPO" ]; then + # Fork PR: prefix with fork owner for isolation + FORK_OWNER="${{ github.event.pull_request.head.repo.owner.login }}" + PREVIEW_BRANCH="preview/${FORK_OWNER}/${BRANCH_NAME}" + BASE_BRANCH="preview/base/${FORK_OWNER}/${BRANCH_NAME}" + else + # Same-repo PR + PREVIEW_BRANCH="preview/${BRANCH_NAME}" + BASE_BRANCH="preview/base/${BRANCH_NAME}" + fi + + echo "preview_branch=${PREVIEW_BRANCH}" >> $GITHUB_OUTPUT + echo "base_branch=${BASE_BRANCH}" >> $GITHUB_OUTPUT + # This action builds preview SDKs from the OpenAPI spec changes and # posts/updates a comment on the PR with build results and links to the preview. - name: Run preview builds @@ -73,6 +97,8 @@ jobs: base_sha: ${{ github.event.pull_request.base.sha }} base_ref: ${{ github.event.pull_request.base.ref }} head_sha: ${{ github.event.pull_request.head.sha }} + branch: ${{ steps.branch-names.outputs.preview_branch }} + base_branch: ${{ steps.branch-names.outputs.base_branch }} merge: if: github.event.action == 'closed' && github.event.pull_request.merged == true @@ -90,12 +116,33 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 2 + # Compute the Stainless branch name, prefixing with fork owner if PR is from a fork. + # For fork PRs like "contributor:fix/issue-123", this creates "preview/contributor/fix/issue-123" + # For same-repo PRs, this creates "preview/fix/issue-123" + - name: Compute branch names + id: branch-names + run: | + HEAD_REPO="${{ github.event.pull_request.head.repo.full_name }}" + BASE_REPO="${{ github.repository }}" + BRANCH_NAME="${{ github.event.pull_request.head.ref }}" + + if [ "$HEAD_REPO" != "$BASE_REPO" ]; then + # Fork PR: prefix with fork owner for isolation + FORK_OWNER="${{ github.event.pull_request.head.repo.owner.login }}" + MERGE_BRANCH="preview/${FORK_OWNER}/${BRANCH_NAME}" + else + # Same-repo PR + MERGE_BRANCH="preview/${BRANCH_NAME}" + fi + + echo "merge_branch=${MERGE_BRANCH}" >> $GITHUB_OUTPUT + # Note that this only merges in changes that happened on the last build on - # preview/${{ github.head_ref }}. It's possible that there are OAS/config - # changes that haven't been built, if the preview-sdk job didn't finish + # the computed preview branch. It's possible that there are OAS/config + # changes that haven't been built, if the preview job didn't finish # before this step starts. In theory we want to wait for all builds - # against preview/${{ github.head_ref }} to complete, but assuming that - # the preview-sdk job happens before the PR merge, it should be fine. + # against the preview branch to complete, but assuming that + # the preview job happens before the PR merge, it should be fine. - name: Run merge build uses: stainless-api/upload-openapi-spec-action/merge@32823b096b4319c53ee948d702d9052873af485f # 1.6.0 with: @@ -108,3 +155,4 @@ jobs: base_sha: ${{ github.event.pull_request.base.sha }} base_ref: ${{ github.event.pull_request.base.ref }} head_sha: ${{ github.event.pull_request.head.sha }} + merge_branch: ${{ steps.branch-names.outputs.merge_branch }} From f18870a22165b8bf3bac297df80a246c130cfb57 Mon Sep 17 00:00:00 2001 From: Roy Belio <34023431+r-bit-rry@users.noreply.github.com> Date: Wed, 19 Nov 2025 20:16:34 +0200 Subject: [PATCH 05/14] fix: Pydantic validation error with list-type metadata in vector search (#3797) (#4173) # Fix for Issue #3797 ## Problem Vector store search failed with Pydantic ValidationError when chunk metadata contained list-type values. **Error:** ``` ValidationError: 3 validation errors for VectorStoreSearchResponse attributes.tags.str: Input should be a valid string attributes.tags.float: Input should be a valid number attributes.tags.bool: Input should be a valid boolean ``` **Root Cause:** - `Chunk.metadata` accepts `dict[str, Any]` (any type allowed) - `VectorStoreSearchResponse.attributes` requires `dict[str, str | float | bool]` (primitives only) - Direct assignment at line 641 caused validation failure for non-primitive types ## Solution Added utility function to filter metadata to primitive types before creating search response. ## Impact **Fixed:** - Vector search works with list metadata (e.g., `tags: ["transformers", "gpu"]`) - Lists become searchable as comma-separated strings - No ValidationError on search responses **Preserved:** - Full metadata still available in `VectorStoreContent.metadata` - No API schema changes - Backward compatible with existing primitive metadata **Affected:** All vector store providers using `OpenAIVectorStoreMixin`: FAISS, Chroma, Qdrant, Milvus, Weaviate, PGVector, SQLite-vec ## Testing tests/unit/providers/vector_io/test_vector_utils.py::test_sanitize_metadata_for_attributes --------- Co-authored-by: Ashwin Bharambe Co-authored-by: Francisco Arceo --- client-sdks/stainless/openapi.yml | 14 +++- docs/static/deprecated-llama-stack-spec.yaml | 14 +++- .../static/experimental-llama-stack-spec.yaml | 14 +++- docs/static/llama-stack-spec.yaml | 14 +++- docs/static/stainless-llama-stack-spec.yaml | 14 +++- src/llama_stack_api/vector_io.py | 69 ++++++++++++++++- .../providers/vector_io/test_vector_utils.py | 76 ++++++++++++++++++- 7 files changed, 207 insertions(+), 8 deletions(-) diff --git a/client-sdks/stainless/openapi.yml b/client-sdks/stainless/openapi.yml index a6ebc868c..9269b7e39 100644 --- a/client-sdks/stainless/openapi.yml +++ b/client-sdks/stainless/openapi.yml @@ -9862,9 +9862,21 @@ components: title: Object default: vector_store.file attributes: - additionalProperties: true + additionalProperties: + anyOf: + - type: string + maxLength: 512 + - type: number + - type: boolean + title: string | number | boolean + propertyNames: + type: string + maxLength: 64 type: object + maxProperties: 16 title: Attributes + description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. + x-oaiTypeLabel: map chunking_strategy: oneOf: - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' diff --git a/docs/static/deprecated-llama-stack-spec.yaml b/docs/static/deprecated-llama-stack-spec.yaml index 207af8926..cf9bd14c4 100644 --- a/docs/static/deprecated-llama-stack-spec.yaml +++ b/docs/static/deprecated-llama-stack-spec.yaml @@ -6705,9 +6705,21 @@ components: title: Object default: vector_store.file attributes: - additionalProperties: true + additionalProperties: + anyOf: + - type: string + maxLength: 512 + - type: number + - type: boolean + title: string | number | boolean + propertyNames: + type: string + maxLength: 64 type: object + maxProperties: 16 title: Attributes + description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. + x-oaiTypeLabel: map chunking_strategy: oneOf: - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' diff --git a/docs/static/experimental-llama-stack-spec.yaml b/docs/static/experimental-llama-stack-spec.yaml index f81a93d33..18ce75562 100644 --- a/docs/static/experimental-llama-stack-spec.yaml +++ b/docs/static/experimental-llama-stack-spec.yaml @@ -6061,9 +6061,21 @@ components: title: Object default: vector_store.file attributes: - additionalProperties: true + additionalProperties: + anyOf: + - type: string + maxLength: 512 + - type: number + - type: boolean + title: string | number | boolean + propertyNames: + type: string + maxLength: 64 type: object + maxProperties: 16 title: Attributes + description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. + x-oaiTypeLabel: map chunking_strategy: oneOf: - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' diff --git a/docs/static/llama-stack-spec.yaml b/docs/static/llama-stack-spec.yaml index 816f3d0fb..9f7b2ed64 100644 --- a/docs/static/llama-stack-spec.yaml +++ b/docs/static/llama-stack-spec.yaml @@ -8883,9 +8883,21 @@ components: title: Object default: vector_store.file attributes: - additionalProperties: true + additionalProperties: + anyOf: + - type: string + maxLength: 512 + - type: number + - type: boolean + title: string | number | boolean + propertyNames: + type: string + maxLength: 64 type: object + maxProperties: 16 title: Attributes + description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. + x-oaiTypeLabel: map chunking_strategy: oneOf: - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' diff --git a/docs/static/stainless-llama-stack-spec.yaml b/docs/static/stainless-llama-stack-spec.yaml index a6ebc868c..9269b7e39 100644 --- a/docs/static/stainless-llama-stack-spec.yaml +++ b/docs/static/stainless-llama-stack-spec.yaml @@ -9862,9 +9862,21 @@ components: title: Object default: vector_store.file attributes: - additionalProperties: true + additionalProperties: + anyOf: + - type: string + maxLength: 512 + - type: number + - type: boolean + title: string | number | boolean + propertyNames: + type: string + maxLength: 64 type: object + maxProperties: 16 title: Attributes + description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. + x-oaiTypeLabel: map chunking_strategy: oneOf: - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' diff --git a/src/llama_stack_api/vector_io.py b/src/llama_stack_api/vector_io.py index bfad644cc..135468d19 100644 --- a/src/llama_stack_api/vector_io.py +++ b/src/llama_stack_api/vector_io.py @@ -11,7 +11,7 @@ from typing import Annotated, Any, Literal, Protocol, runtime_checkable from fastapi import Body, Query -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, field_validator from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.inference import InterleavedContent @@ -372,6 +372,65 @@ VectorStoreFileStatus = Literal["completed"] | Literal["in_progress"] | Literal[ register_schema(VectorStoreFileStatus, name="VectorStoreFileStatus") +# VectorStoreFileAttributes type with OpenAPI constraints +VectorStoreFileAttributes = Annotated[ + dict[str, Annotated[str, Field(max_length=512)] | float | bool], + Field( + max_length=16, + json_schema_extra={ + "propertyNames": {"type": "string", "maxLength": 64}, + "x-oaiTypeLabel": "map", + }, + description=( + "Set of 16 key-value pairs that can be attached to an object. This can be " + "useful for storing additional information about the object in a structured " + "format, and querying for objects via API or the dashboard. Keys are strings " + "with a maximum length of 64 characters. Values are strings with a maximum " + "length of 512 characters, booleans, or numbers." + ), + ), +] + + +def _sanitize_vector_store_attributes(metadata: dict[str, Any] | None) -> dict[str, str | float | bool]: + """ + Sanitize metadata to VectorStoreFileAttributes spec (max 16 properties, primitives only). + + Converts dict[str, Any] to dict[str, str | float | bool]: + - Preserves: str (truncated to 512 chars), bool, int/float (as float) + - Converts: list -> comma-separated string + - Filters: dict, None, other types + - Enforces: max 16 properties, max 64 char keys, max 512 char string values + """ + if not metadata: + return {} + + sanitized: dict[str, str | float | bool] = {} + for key, value in metadata.items(): + # Enforce max 16 properties + if len(sanitized) >= 16: + break + + # Enforce max 64 char keys + if len(key) > 64: + continue + + # Convert to supported primitive types + if isinstance(value, bool): + sanitized[key] = value + elif isinstance(value, int | float): + sanitized[key] = float(value) + elif isinstance(value, str): + # Enforce max 512 char string values + sanitized[key] = value[:512] if len(value) > 512 else value + elif isinstance(value, list): + # Convert lists to comma-separated strings (max 512 chars) + list_str = ", ".join(str(item) for item in value) + sanitized[key] = list_str[:512] if len(list_str) > 512 else list_str + + return sanitized + + @json_schema_type class VectorStoreFileObject(BaseModel): """OpenAI Vector Store File object. @@ -389,7 +448,7 @@ class VectorStoreFileObject(BaseModel): id: str object: str = "vector_store.file" - attributes: dict[str, Any] = Field(default_factory=dict) + attributes: VectorStoreFileAttributes = Field(default_factory=dict) chunking_strategy: VectorStoreChunkingStrategy created_at: int last_error: VectorStoreFileLastError | None = None @@ -397,6 +456,12 @@ class VectorStoreFileObject(BaseModel): usage_bytes: int = 0 vector_store_id: str + @field_validator("attributes", mode="before") + @classmethod + def _validate_attributes(cls, v: dict[str, Any] | None) -> dict[str, str | float | bool]: + """Sanitize attributes to match VectorStoreFileAttributes OpenAPI spec.""" + return _sanitize_vector_store_attributes(v) + @json_schema_type class VectorStoreListFilesResponse(BaseModel): diff --git a/tests/unit/providers/vector_io/test_vector_utils.py b/tests/unit/providers/vector_io/test_vector_utils.py index 7f6b4af79..3e6b2971f 100644 --- a/tests/unit/providers/vector_io/test_vector_utils.py +++ b/tests/unit/providers/vector_io/test_vector_utils.py @@ -5,7 +5,7 @@ # the root directory of this source tree. from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id -from llama_stack_api import Chunk, ChunkMetadata +from llama_stack_api import Chunk, ChunkMetadata, VectorStoreFileObject # This test is a unit test for the chunk_utils.py helpers. This should only contain # tests which are specific to this file. More general (API-level) tests should be placed in @@ -78,3 +78,77 @@ def test_chunk_serialization(): serialized_chunk = chunk.model_dump() assert serialized_chunk["chunk_id"] == "test-chunk-id" assert "chunk_id" in serialized_chunk + + +def test_vector_store_file_object_attributes_validation(): + """Test VectorStoreFileObject validates and sanitizes attributes at input boundary.""" + # Test with metadata containing lists, nested dicts, and primitives + from llama_stack_api.vector_io import VectorStoreChunkingStrategyAuto + + file_obj = VectorStoreFileObject( + id="file-123", + attributes={ + "tags": ["transformers", "h100-compatible", "region:us"], # List -> string + "model_name": "granite-3.3-8b", # String preserved + "score": 0.95, # Float preserved + "active": True, # Bool preserved + "count": 42, # Int -> float + "nested": {"key": "value"}, # Dict filtered out + }, + chunking_strategy=VectorStoreChunkingStrategyAuto(), + created_at=1234567890, + status="completed", + vector_store_id="vs-123", + ) + + # Lists converted to comma-separated strings + assert file_obj.attributes["tags"] == "transformers, h100-compatible, region:us" + # Primitives preserved + assert file_obj.attributes["model_name"] == "granite-3.3-8b" + assert file_obj.attributes["score"] == 0.95 + assert file_obj.attributes["active"] is True + assert file_obj.attributes["count"] == 42.0 # int -> float + # Complex types filtered out + assert "nested" not in file_obj.attributes + + +def test_vector_store_file_object_attributes_constraints(): + """Test VectorStoreFileObject enforces OpenAPI constraints on attributes.""" + from llama_stack_api.vector_io import VectorStoreChunkingStrategyAuto + + # Test max 16 properties + many_attrs = {f"key{i}": f"value{i}" for i in range(20)} + file_obj = VectorStoreFileObject( + id="file-123", + attributes=many_attrs, + chunking_strategy=VectorStoreChunkingStrategyAuto(), + created_at=1234567890, + status="completed", + vector_store_id="vs-123", + ) + assert len(file_obj.attributes) == 16 # Max 16 properties + + # Test max 64 char keys are filtered + long_key_attrs = {"a" * 65: "value", "valid_key": "value"} + file_obj = VectorStoreFileObject( + id="file-124", + attributes=long_key_attrs, + chunking_strategy=VectorStoreChunkingStrategyAuto(), + created_at=1234567890, + status="completed", + vector_store_id="vs-123", + ) + assert "a" * 65 not in file_obj.attributes + assert "valid_key" in file_obj.attributes + + # Test max 512 char string values are truncated + long_value_attrs = {"key": "x" * 600} + file_obj = VectorStoreFileObject( + id="file-125", + attributes=long_value_attrs, + chunking_strategy=VectorStoreChunkingStrategyAuto(), + created_at=1234567890, + status="completed", + vector_store_id="vs-123", + ) + assert len(file_obj.attributes["key"]) == 512 From 72ea95e2e006645cb30bdf736859b23ae499749b Mon Sep 17 00:00:00 2001 From: Shabana Baig <43451943+s-akhtar-baig@users.noreply.github.com> Date: Wed, 19 Nov 2025 13:27:56 -0500 Subject: [PATCH 06/14] fix: Fix max_tool_calls for openai provider and add integration tests for the max_tool_calls feat (#4190) # Problem OpenAI gpt-4 returned an error when built-in and mcp calls were skipped due to max_tool_calls parameter. Following is from the server log: ``` RuntimeError: OpenAI response failed: Error code: 400 - {'error': {'message': "An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_Yi9V1QNpN73dJCAgP2Arcjej", 'type': 'invalid_request_error', 'param': 'messages', 'code': None}} ``` # What does this PR do? - Fixes error returned by openai/gpt when calls were skipped due to max_tool_calls. We now return a tool message that explicitly mentions that the call is skipped. - Adds integration tests as a follow-up to PR#[4062](https://github.com/llamastack/llama-stack/pull/4062) Part 2 for issue #[3563](https://github.com/llamastack/llama-stack/issues/3563) ## Test Plan - Added integration tests - Added new recordings --------- Co-authored-by: Ashwin Bharambe --- .../meta_reference/responses/streaming.py | 11 +- .../agents/test_openai_responses.py | 166 -- ...ddde3553f0db5d5a673146d8bb99c072e77cd.json | 773 ++++++++ ...c43af318468eb4ef84fd4008ebb40824b7e86.json | 593 ++++++ ...ddbcf60a1fedd85c501850b9f7e759443809f.json | 773 ++++++++ ...c2a685da5e743820a68de74640451f0072184.json | 1099 +++++++++++ ...6700e308ebbe9389bc5a1da8f4840fc9031ef.json | 1099 +++++++++++ ...04cf049a522bd106852b6d09e9baf41df88d3.json | 1634 ++++++++++++++++ ...de4d4f415f237e22b2b6983677a1e1319a0d3.json | 593 ++++++ ...d0b947d35c870ff825f06d8997a84dca1f5bf.json | 1661 +++++++++++++++++ .../responses/test_tool_responses.py | 152 ++ 11 files changed, 8386 insertions(+), 168 deletions(-) create mode 100644 tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json create mode 100644 tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json create mode 100644 tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json create mode 100644 tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json create mode 100644 tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json create mode 100644 tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json create mode 100644 tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json create mode 100644 tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py index 0ef74f1f1..9e901d88b 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py @@ -66,6 +66,7 @@ from llama_stack_api import ( OpenAIResponseUsage, OpenAIResponseUsageInputTokensDetails, OpenAIResponseUsageOutputTokensDetails, + OpenAIToolMessageParam, Safety, WebSearchToolTypes, ) @@ -906,10 +907,16 @@ class StreamingResponseOrchestrator: """Coordinate execution of both function and non-function tool calls.""" # Execute non-function tool calls for tool_call in non_function_tool_calls: - # Check if total calls made to built-in and mcp tools exceed max_tool_calls + # if total calls made to built-in and mcp tools exceed max_tool_calls + # then create a tool response message indicating the call was skipped if self.max_tool_calls is not None and self.accumulated_builtin_tool_calls >= self.max_tool_calls: logger.info(f"Ignoring built-in and mcp tool call since reached the limit of {self.max_tool_calls=}.") - break + skipped_call_message = OpenAIToolMessageParam( + content=f"Tool call skipped: maximum tool calls limit ({self.max_tool_calls}) reached.", + tool_call_id=tool_call.id, + ) + next_turn_messages.append(skipped_call_message) + continue # Find the item_id for this tool call matching_item_id = None diff --git a/tests/integration/agents/test_openai_responses.py b/tests/integration/agents/test_openai_responses.py index 057cee774..d413d5201 100644 --- a/tests/integration/agents/test_openai_responses.py +++ b/tests/integration/agents/test_openai_responses.py @@ -516,169 +516,3 @@ def test_response_with_instructions(openai_client, client_with_models, text_mode # Verify instructions from previous response was not carried over to the next response assert response_with_instructions2.instructions == instructions2 - - -@pytest.mark.skip(reason="Tool calling is not reliable.") -def test_max_tool_calls_with_function_tools(openai_client, client_with_models, text_model_id): - """Test handling of max_tool_calls with function tools in responses.""" - if isinstance(client_with_models, LlamaStackAsLibraryClient): - pytest.skip("OpenAI responses are not supported when testing with library client yet.") - - client = openai_client - max_tool_calls = 1 - - tools = [ - { - "type": "function", - "name": "get_weather", - "description": "Get weather information for a specified location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city name (e.g., 'New York', 'London')", - }, - }, - }, - }, - { - "type": "function", - "name": "get_time", - "description": "Get current time for a specified location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city name (e.g., 'New York', 'London')", - }, - }, - }, - }, - ] - - # First create a response that triggers function tools - response = client.responses.create( - model=text_model_id, - input="Can you tell me the weather in Paris and the current time?", - tools=tools, - stream=False, - max_tool_calls=max_tool_calls, - ) - - # Verify we got two function calls and that the max_tool_calls do not affect function tools - assert len(response.output) == 2 - assert response.output[0].type == "function_call" - assert response.output[0].name == "get_weather" - assert response.output[0].status == "completed" - assert response.output[1].type == "function_call" - assert response.output[1].name == "get_time" - assert response.output[0].status == "completed" - - # Verify we have a valid max_tool_calls field - assert response.max_tool_calls == max_tool_calls - - -def test_max_tool_calls_invalid(openai_client, client_with_models, text_model_id): - """Test handling of invalid max_tool_calls in responses.""" - if isinstance(client_with_models, LlamaStackAsLibraryClient): - pytest.skip("OpenAI responses are not supported when testing with library client yet.") - - client = openai_client - - input = "Search for today's top technology news." - invalid_max_tool_calls = 0 - tools = [ - {"type": "web_search"}, - ] - - # Create a response with an invalid max_tool_calls value i.e. 0 - # Handle ValueError from LLS and BadRequestError from OpenAI client - with pytest.raises((ValueError, BadRequestError)) as excinfo: - client.responses.create( - model=text_model_id, - input=input, - tools=tools, - stream=False, - max_tool_calls=invalid_max_tool_calls, - ) - - error_message = str(excinfo.value) - assert f"Invalid max_tool_calls={invalid_max_tool_calls}; should be >= 1" in error_message, ( - f"Expected error message about invalid max_tool_calls, got: {error_message}" - ) - - -def test_max_tool_calls_with_builtin_tools(openai_client, client_with_models, text_model_id): - """Test handling of max_tool_calls with built-in tools in responses.""" - if isinstance(client_with_models, LlamaStackAsLibraryClient): - pytest.skip("OpenAI responses are not supported when testing with library client yet.") - - client = openai_client - - input = "Search for today's top technology and a positive news story. You MUST make exactly two separate web search calls." - max_tool_calls = [1, 5] - tools = [ - {"type": "web_search"}, - ] - - # First create a response that triggers web_search tools without max_tool_calls - response = client.responses.create( - model=text_model_id, - input=input, - tools=tools, - stream=False, - ) - - # Verify we got two web search calls followed by a message - assert len(response.output) == 3 - assert response.output[0].type == "web_search_call" - assert response.output[0].status == "completed" - assert response.output[1].type == "web_search_call" - assert response.output[1].status == "completed" - assert response.output[2].type == "message" - assert response.output[2].status == "completed" - assert response.output[2].role == "assistant" - - # Next create a response that triggers web_search tools with max_tool_calls set to 1 - response_2 = client.responses.create( - model=text_model_id, - input=input, - tools=tools, - stream=False, - max_tool_calls=max_tool_calls[0], - ) - - # Verify we got one web search tool call followed by a message - assert len(response_2.output) == 2 - assert response_2.output[0].type == "web_search_call" - assert response_2.output[0].status == "completed" - assert response_2.output[1].type == "message" - assert response_2.output[1].status == "completed" - assert response_2.output[1].role == "assistant" - - # Verify we have a valid max_tool_calls field - assert response_2.max_tool_calls == max_tool_calls[0] - - # Finally create a response that triggers web_search tools with max_tool_calls set to 5 - response_3 = client.responses.create( - model=text_model_id, - input=input, - tools=tools, - stream=False, - max_tool_calls=max_tool_calls[1], - ) - - # Verify we got two web search calls followed by a message - assert len(response_3.output) == 3 - assert response_3.output[0].type == "web_search_call" - assert response_3.output[0].status == "completed" - assert response_3.output[1].type == "web_search_call" - assert response_3.output[1].status == "completed" - assert response_3.output[2].type == "message" - assert response_3.output[2].status == "completed" - assert response_3.output[2].role == "assistant" - - # Verify we have a valid max_tool_calls field - assert response_3.max_tool_calls == max_tool_calls[1] diff --git a/tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json b/tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json new file mode 100644 index 000000000..4418331b0 --- /dev/null +++ b/tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json @@ -0,0 +1,773 @@ +{ + "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[client_with_models-txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'" + } + ], + "stream": true, + "stream_options": { + "include_usage": true + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_user_id", + "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ", + "parameters": { + "properties": { + "username": { + "title": "Username", + "type": "string" + } + }, + "required": [ + "username" + ], + "title": "get_user_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_user_permissions", + "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + } + }, + "required": [ + "user_id" + ], + "title": "get_user_permissionsArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "check_file_access", + "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "required": [ + "user_id", + "filename" + ], + "title": "check_file_accessArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_id", + "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ", + "parameters": { + "properties": { + "experiment_name": { + "title": "Experiment Name", + "type": "string" + } + }, + "required": [ + "experiment_name" + ], + "title": "get_experiment_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_results", + "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ", + "parameters": { + "properties": { + "experiment_id": { + "title": "Experiment Id", + "type": "string" + } + }, + "required": [ + "experiment_id" + ], + "title": "get_experiment_resultsArguments", + "type": "object" + } + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "1V9w3bXnppL" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg", + "function": { + "arguments": "", + "name": "get_experiment_id" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "YEsj" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "{\"ex", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "n" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "perim", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Q" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "ent_na", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "me\":", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "U" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": " \"boi", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "ling_p", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "oint", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ha" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "\"}", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "d5D" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": "call_HELkyZOm2fzLx2CeTH3bEcS2", + "function": { + "arguments": "", + "name": "get_user_id" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "0LbsjDcKz6" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "{\"us", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "c" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "ernam", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "9" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "e\": \"c", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "7C0WFn181I3y3l" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "harl", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "wf" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "ie\"}", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "r" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "tool_calls", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "FAci" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-1997dc007d20", + "choices": [], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": { + "completion_tokens": 51, + "prompt_tokens": 393, + "total_tokens": 444, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "audio_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 0 + }, + "prompt_tokens_details": { + "audio_tokens": 0, + "cached_tokens": 0 + } + }, + "obfuscation": "6xgpRRdKjviPT" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json b/tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json new file mode 100644 index 000000000..3bec72d95 --- /dev/null +++ b/tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json @@ -0,0 +1,593 @@ +{ + "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_function_tools[openai_client-txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Can you tell me the weather in Paris and the current time?" + } + ], + "stream": true, + "stream_options": { + "include_usage": true + }, + "tools": [ + { + "type": "function", + "function": { + "type": "function", + "name": "get_weather", + "description": "Get weather information for a specified location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name (e.g., 'New York', 'London')" + } + } + }, + "strict": null + } + }, + { + "type": "function", + "function": { + "type": "function", + "name": "get_time", + "description": "Get current time for a specified location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name (e.g., 'New York', 'London')" + } + } + }, + "strict": null + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "QmTXstGvpa8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": "call_HJMoLtHXfCzhlMQOfqIKt0n3", + "function": { + "arguments": "", + "name": "get_weather" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "iFjmkK23KL" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "{\"lo", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "7" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "catio", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "L" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "n\": \"P", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "THa6gWbrWhVmZ6" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "aris", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "eL" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "\"}", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "jng" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": "call_vGKvTKZM7aALMaUw3Jas7lRg", + "function": { + "arguments": "", + "name": "get_time" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "LSailgMcgSl54" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "{\"lo", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "z" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "catio", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "4" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "n\": \"P", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "0engr6vRvqXTEP" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "aris", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "Pe" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "\"}", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "LU9" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "tool_calls", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "kD7d" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-463ab0e2f291", + "choices": [], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": { + "completion_tokens": 44, + "prompt_tokens": 110, + "total_tokens": 154, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "audio_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 0 + }, + "prompt_tokens_details": { + "audio_tokens": 0, + "cached_tokens": 0 + } + }, + "obfuscation": "R4ICoxqTqj7ZY" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json b/tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json new file mode 100644 index 000000000..ee32a4396 --- /dev/null +++ b/tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json @@ -0,0 +1,773 @@ +{ + "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[openai_client-txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'" + } + ], + "stream": true, + "stream_options": { + "include_usage": true + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_user_id", + "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ", + "parameters": { + "properties": { + "username": { + "title": "Username", + "type": "string" + } + }, + "required": [ + "username" + ], + "title": "get_user_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_user_permissions", + "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + } + }, + "required": [ + "user_id" + ], + "title": "get_user_permissionsArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "check_file_access", + "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "required": [ + "user_id", + "filename" + ], + "title": "check_file_accessArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_id", + "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ", + "parameters": { + "properties": { + "experiment_name": { + "title": "Experiment Name", + "type": "string" + } + }, + "required": [ + "experiment_name" + ], + "title": "get_experiment_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_results", + "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ", + "parameters": { + "properties": { + "experiment_id": { + "title": "Experiment Id", + "type": "string" + } + }, + "required": [ + "experiment_id" + ], + "title": "get_experiment_resultsArguments", + "type": "object" + } + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "N5OTLR9CfmU" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": "call_z8P1RQv54BLxyMlRdMFkcCGd", + "function": { + "arguments": "", + "name": "get_experiment_id" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "3EKK" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "{\"ex", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "R" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "perim", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Q" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "ent_na", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "me\":", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "6" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": " \"boi", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "ling_p", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "oint", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "pw" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "\"}", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Gfk" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": "call_I5tcLgyMADoVwLKDj9HkTCs5", + "function": { + "arguments": "", + "name": "get_user_id" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Yp7IueDs5V" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "{\"us", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "ernam", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "X" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "e\": \"c", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "2oif8BwVnTCnAF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "harl", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "hv" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "ie\"}", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "C" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "tool_calls", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ctjO" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b218af7fa066", + "choices": [], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": { + "completion_tokens": 51, + "prompt_tokens": 393, + "total_tokens": 444, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "audio_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 0 + }, + "prompt_tokens_details": { + "audio_tokens": 0, + "cached_tokens": 0 + } + }, + "obfuscation": "fclbZeBSSKN4C" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json b/tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json new file mode 100644 index 000000000..2f5d2364f --- /dev/null +++ b/tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json @@ -0,0 +1,1099 @@ +{ + "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[client_with_models-txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "index": 0, + "id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg", + "type": "function", + "function": { + "name": "get_experiment_id", + "arguments": "{\"experiment_name\": \"boiling_point\"}" + } + }, + { + "index": 1, + "id": "call_HELkyZOm2fzLx2CeTH3bEcS2", + "type": "function", + "function": { + "name": "get_user_id", + "arguments": "{\"username\": \"charlie\"}" + } + } + ] + }, + { + "role": "tool", + "tool_call_id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg", + "content": [ + { + "type": "text", + "text": "exp_004" + } + ] + }, + { + "role": "tool", + "tool_call_id": "call_HELkyZOm2fzLx2CeTH3bEcS2", + "content": [ + { + "type": "text", + "text": "user_11111" + } + ] + } + ], + "stream": true, + "stream_options": { + "include_usage": true + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_user_id", + "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ", + "parameters": { + "properties": { + "username": { + "title": "Username", + "type": "string" + } + }, + "required": [ + "username" + ], + "title": "get_user_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_user_permissions", + "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + } + }, + "required": [ + "user_id" + ], + "title": "get_user_permissionsArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "check_file_access", + "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "required": [ + "user_id", + "filename" + ], + "title": "check_file_accessArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_id", + "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ", + "parameters": { + "properties": { + "experiment_name": { + "title": "Experiment Name", + "type": "string" + } + }, + "required": [ + "experiment_name" + ], + "title": "get_experiment_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_results", + "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ", + "parameters": { + "properties": { + "experiment_id": { + "title": "Experiment Id", + "type": "string" + } + }, + "required": [ + "experiment_id" + ], + "title": "get_experiment_resultsArguments", + "type": "object" + } + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "YYi7jfwMArDwjF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "The", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "02OX5OI6tENcr" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " experiment", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "4WNc0" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " ID", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "tKtJ1sl5pfaDr" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Hvj1aWM1rpv8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " '", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "9E9CvQfqolGi9S" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "bo", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "j4WB9GjVD9jcfN" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "iling", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "TTDWSqM29LF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "_point", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "AjjxQybBbe" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "'", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "1gVblRiURtILOET" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " is", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "0R3NJvfpXy2dP" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " `", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "A7ulc3isZRh1Wy" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "exp", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "FPq6iOQwJS1aQ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "_", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Kc20HZgwXltY5rS" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "004", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "2FCOJr6gSDviM" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "`,", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "zcC44JB9JLv8DJ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " and", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "YkHz4dmGI8Ip" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "WU1FWVwHa8kT" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " user", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "F89Whppjswq" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " ID", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "WSOnxHfHCWTqS" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "xdc4FO9TTNKE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " '", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "815WDeN0y91Hke" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "char", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "xp6WP0YmWjNZ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "lie", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "apUUpE3jkpxjm" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "'", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "TfCA46aEfur7ddv" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " is", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "4q5btS7EmyGo4" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": " `", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "a5UVTkIvEXtjbH" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "user", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "UGU1lPYHNno0" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "_", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "4axBUdqWraTmuNf" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "111", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ZtMOpwGI78JEH" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "11", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "LqPjHcx2BmtLO1" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": "`.", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "l5q2xqEWQx4dA4" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "sM6qZWT3Vp" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b2b590332535", + "choices": [], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": { + "completion_tokens": 32, + "prompt_tokens": 465, + "total_tokens": 497, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "audio_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 0 + }, + "prompt_tokens_details": { + "audio_tokens": 0, + "cached_tokens": 0 + } + }, + "obfuscation": "Nr5ToBPpxyZu4" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json b/tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json new file mode 100644 index 000000000..3c9321759 --- /dev/null +++ b/tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json @@ -0,0 +1,1099 @@ +{ + "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[openai_client-txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "index": 0, + "id": "call_z8P1RQv54BLxyMlRdMFkcCGd", + "type": "function", + "function": { + "name": "get_experiment_id", + "arguments": "{\"experiment_name\": \"boiling_point\"}" + } + }, + { + "index": 1, + "id": "call_I5tcLgyMADoVwLKDj9HkTCs5", + "type": "function", + "function": { + "name": "get_user_id", + "arguments": "{\"username\": \"charlie\"}" + } + } + ] + }, + { + "role": "tool", + "tool_call_id": "call_z8P1RQv54BLxyMlRdMFkcCGd", + "content": [ + { + "type": "text", + "text": "exp_004" + } + ] + }, + { + "role": "tool", + "tool_call_id": "call_I5tcLgyMADoVwLKDj9HkTCs5", + "content": [ + { + "type": "text", + "text": "user_11111" + } + ] + } + ], + "stream": true, + "stream_options": { + "include_usage": true + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_user_id", + "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ", + "parameters": { + "properties": { + "username": { + "title": "Username", + "type": "string" + } + }, + "required": [ + "username" + ], + "title": "get_user_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_user_permissions", + "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + } + }, + "required": [ + "user_id" + ], + "title": "get_user_permissionsArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "check_file_access", + "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "required": [ + "user_id", + "filename" + ], + "title": "check_file_accessArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_id", + "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ", + "parameters": { + "properties": { + "experiment_name": { + "title": "Experiment Name", + "type": "string" + } + }, + "required": [ + "experiment_name" + ], + "title": "get_experiment_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_results", + "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ", + "parameters": { + "properties": { + "experiment_id": { + "title": "Experiment Id", + "type": "string" + } + }, + "required": [ + "experiment_id" + ], + "title": "get_experiment_resultsArguments", + "type": "object" + } + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "wwHFAiwvH4WszR" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "The", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "9715Kiw8g6FeU" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " experiment", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "f3RUP" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " ID", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "uTou0sZw0Trqr" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "O3FUhiRX4t3O" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " '", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "8Row2VeWyXlavX" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "bo", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "R6KU5Aed2Y4hdt" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "iling", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "aXOqmJlIAIp" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "_point", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "AEyQ67P1E9" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "'", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "pxs1ElabWHWYTsE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " is", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "f4fvZlQAsoFLb" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " `", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "XIUUCRzVlWEjdW" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "exp", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "x2dM9CVkT0ICQ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "_", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Ls8dfHOXPeHjdGE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "004", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "RF1hpcOB964EM" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "`,", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "QnLWon1Lh1bPrb" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " and", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "0OHZT5bnbdwa" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "jtbU7bWjfj72" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " user", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "nCopvKj1JIE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " ID", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "2ZDuFZoCixweF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "u3QmR0zYiExg" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " '", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "z6tGgyH3Gw667d" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "char", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "HalCDTgB5QRV" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "lie", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "5UJBpMTsZMjVF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "'", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "p8zU7xEpcUR63Lh" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " is", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "t0fKxlCyUxaFU" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": " `", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "lRSEHqi9mVmVZJ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "user", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "8C6DeNABBjpJ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "_", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "L4qXmW7bonqcf97" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "111", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "zje3cRhC3fzKb" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "11", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "NgeVi1nYcUbkmN" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": "`.", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "d83dlilKTeA1RE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "HnPRpNWz4n" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-b376e47c1857", + "choices": [], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": { + "completion_tokens": 32, + "prompt_tokens": 465, + "total_tokens": 497, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "audio_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 0 + }, + "prompt_tokens_details": { + "audio_tokens": 0, + "cached_tokens": 0 + } + }, + "obfuscation": "sfrloH58kmZpA" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json b/tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json new file mode 100644 index 000000000..821bd20c4 --- /dev/null +++ b/tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json @@ -0,0 +1,1634 @@ +{ + "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[client_with_models-txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "index": 0, + "id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg", + "type": "function", + "function": { + "name": "get_experiment_id", + "arguments": "{\"experiment_name\": \"boiling_point\"}" + } + }, + { + "index": 1, + "id": "call_HELkyZOm2fzLx2CeTH3bEcS2", + "type": "function", + "function": { + "name": "get_user_id", + "arguments": "{\"username\": \"charlie\"}" + } + } + ] + }, + { + "role": "tool", + "tool_call_id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg", + "content": [ + { + "type": "text", + "text": "exp_004" + } + ] + }, + { + "role": "tool", + "tool_call_id": "call_HELkyZOm2fzLx2CeTH3bEcS2", + "content": "Tool call skipped: maximum tool calls limit (1) reached." + } + ], + "stream": true, + "stream_options": { + "include_usage": true + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_user_id", + "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ", + "parameters": { + "properties": { + "username": { + "title": "Username", + "type": "string" + } + }, + "required": [ + "username" + ], + "title": "get_user_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_user_permissions", + "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + } + }, + "required": [ + "user_id" + ], + "title": "get_user_permissionsArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "check_file_access", + "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "required": [ + "user_id", + "filename" + ], + "title": "check_file_accessArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_id", + "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ", + "parameters": { + "properties": { + "experiment_name": { + "title": "Experiment Name", + "type": "string" + } + }, + "required": [ + "experiment_name" + ], + "title": "get_experiment_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_results", + "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ", + "parameters": { + "properties": { + "experiment_id": { + "title": "Experiment Id", + "type": "string" + } + }, + "required": [ + "experiment_id" + ], + "title": "get_experiment_resultsArguments", + "type": "object" + } + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "9zm2knPUrQf9Ti" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "The", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "dBZWt7n0cY28K" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " experiment", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "gBkUe" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " ID", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "DK27AidkjJEUs" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "BvRS3fe55saU" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " '", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Q30TpKRJ8sqbaj" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "bo", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "uZIcYxencsPVq7" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "iling", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "OTlywqpO2gu" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "_point", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "1D39HJt78o" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "'", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "z9q3XLiA1zUj69i" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " is", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "YilL3DwdzhGNE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " `", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "yLvB3LVIF9yqTB" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "exp", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "aQ2ZgA6wBrzgb" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "_", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "0jzpzruxw3CNxO3" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "004", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Wl5Eu8yWUoj2V" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "`.", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "F3a7FpN1N5MOoL" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " However", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "oC3Sc1Oj" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "dR3KxirqoL6RMvN" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " I", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "HDIUF9MxNvDNC8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " wasn't", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "jvYMbj7Jb" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " able", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "wA25F90roLY" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "1kP6AeTeGmGNU" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " get", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "8zixGSMc9fiH" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "UCSCTgIKkLiT" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " user", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "1hHm53qitSi" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " ID", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "N3NBeCvE43ZRW" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ul7bMYRpL04n" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " '", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ABgwNSe6WHqE9N" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "char", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "6q5tAeJOMEC8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "lie", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "gxcccAWJYWckn" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": "'", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "qpqi3k54AaZDnNH" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " due", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "OB5oYuchm2uE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "MKHpNGKsdWpLO" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "zYt4J00NPy69fJ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " tool", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Z0kM0bozww8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " call", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "qbQA28Mr3PO" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " limit", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ZzevZnpsYj" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "QBno7Vj0QhMrSjO" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " Please", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "hEj0RemlE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " let", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "xN8xRqzcxXCR" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " me", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "0LxJ9leKvCunj" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " know", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "KoHcgiBEVc6" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " if", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "eT2hCjpvISlxh" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "9LJdcoWEzgMP" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " would", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "bxChZ0IYYP" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " like", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "oU5UBQRKEpI" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " me", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "HQHzzykuhNV7v" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "YJ86yXpqctfF5" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " attempt", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ToTM0n5O" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " that", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "SateSvqBggb" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": " again", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "APRnnp4Qce" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Xe9yNJcVnFP4PZl" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ZH7NR5wSoI" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-c1b953d78e04", + "choices": [], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": { + "completion_tokens": 52, + "prompt_tokens": 474, + "total_tokens": 526, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "audio_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 0 + }, + "prompt_tokens_details": { + "audio_tokens": 0, + "cached_tokens": 0 + } + }, + "obfuscation": "2P0uXrABC0X8d" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json b/tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json new file mode 100644 index 000000000..450d84176 --- /dev/null +++ b/tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json @@ -0,0 +1,593 @@ +{ + "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_function_tools[client_with_models-txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Can you tell me the weather in Paris and the current time?" + } + ], + "stream": true, + "stream_options": { + "include_usage": true + }, + "tools": [ + { + "type": "function", + "function": { + "type": "function", + "name": "get_weather", + "description": "Get weather information for a specified location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name (e.g., 'New York', 'London')" + } + } + }, + "strict": null + } + }, + { + "type": "function", + "function": { + "type": "function", + "name": "get_time", + "description": "Get current time for a specified location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name (e.g., 'New York', 'London')" + } + } + }, + "strict": null + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "iUduPiCYBRb" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": "call_Wv3G8aEQOJLNXGRaK3hAWzq3", + "function": { + "arguments": "", + "name": "get_weather" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "cqZKgzm65y" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "{\"lo", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "catio", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "L" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "n\": \"P", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "zbBLzavvnEdLz0" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "aris", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "Gj" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 0, + "id": null, + "function": { + "arguments": "\"}", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "LQo" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": "call_8xkOmOgJpV77n5W2dSx6ytW6", + "function": { + "arguments": "", + "name": "get_time" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "eltoncGlxI8Go" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "{\"lo", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "S" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "catio", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "N" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "n\": \"P", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "2bTn1MaAXYFoVK" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "aris", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "VF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": [ + { + "index": 1, + "id": null, + "function": { + "arguments": "\"}", + "name": null + }, + "type": null + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "BHi" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "tool_calls", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": null, + "obfuscation": "WaYG" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-d073f434d28c", + "choices": [], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_b1442291a8", + "usage": { + "completion_tokens": 44, + "prompt_tokens": 110, + "total_tokens": 154, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "audio_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 0 + }, + "prompt_tokens_details": { + "audio_tokens": 0, + "cached_tokens": 0 + } + }, + "obfuscation": "aevj6ZWLqfCK6" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json b/tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json new file mode 100644 index 000000000..089242af3 --- /dev/null +++ b/tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json @@ -0,0 +1,1661 @@ +{ + "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[openai_client-txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'" + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "index": 0, + "id": "call_z8P1RQv54BLxyMlRdMFkcCGd", + "type": "function", + "function": { + "name": "get_experiment_id", + "arguments": "{\"experiment_name\": \"boiling_point\"}" + } + }, + { + "index": 1, + "id": "call_I5tcLgyMADoVwLKDj9HkTCs5", + "type": "function", + "function": { + "name": "get_user_id", + "arguments": "{\"username\": \"charlie\"}" + } + } + ] + }, + { + "role": "tool", + "tool_call_id": "call_z8P1RQv54BLxyMlRdMFkcCGd", + "content": [ + { + "type": "text", + "text": "exp_004" + } + ] + }, + { + "role": "tool", + "tool_call_id": "call_I5tcLgyMADoVwLKDj9HkTCs5", + "content": "Tool call skipped: maximum tool calls limit (1) reached." + } + ], + "stream": true, + "stream_options": { + "include_usage": true + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_user_id", + "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ", + "parameters": { + "properties": { + "username": { + "title": "Username", + "type": "string" + } + }, + "required": [ + "username" + ], + "title": "get_user_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_user_permissions", + "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + } + }, + "required": [ + "user_id" + ], + "title": "get_user_permissionsArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "check_file_access", + "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ", + "parameters": { + "properties": { + "user_id": { + "title": "User Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "required": [ + "user_id", + "filename" + ], + "title": "check_file_accessArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_id", + "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ", + "parameters": { + "properties": { + "experiment_name": { + "title": "Experiment Name", + "type": "string" + } + }, + "required": [ + "experiment_name" + ], + "title": "get_experiment_idArguments", + "type": "object" + } + } + }, + { + "type": "function", + "function": { + "name": "get_experiment_results", + "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ", + "parameters": { + "properties": { + "experiment_id": { + "title": "Experiment Id", + "type": "string" + } + }, + "required": [ + "experiment_id" + ], + "title": "get_experiment_resultsArguments", + "type": "object" + } + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "uoj10MYhhjCsjQ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "The", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "RbrwfJ20BVqRi" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " experiment", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "88xHU" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " ID", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "lXhzWF230RZCL" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "McIrBR2XVfyS" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " '", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "7SiItrYff13YKr" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "bo", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "pf232bD4VeXdXc" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "iling", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "z0kyzhP7ioh" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "_point", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "3TUkmyiT28" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "'", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "kFAkj6BHwM6YKZQ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " is", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "fiRWSM9LNpP4J" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " `", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "VRPBkgW9PrA6C7" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "exp", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "YqSi9vVuexh3e" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "_", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "y64suQvx1Nfp8Pj" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "004", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "kouF1KXaF3fSv" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "`.", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Ju1xHmwme71tPA" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " However", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "TZuAhRJ8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ikVKxLAdOhUPHHa" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " I", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "pntThOzs2GzlYs" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " couldn't", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "v4ihoTx" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " retrieve", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "476NjPo" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "AFDAUQw3ezkM" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " user", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ztweLiyDuwu" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " ID", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "q575s9DLRlXDL" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "oEoKwHu8H1FD" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " '", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "KOgPjHTbZYg83A" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "char", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "PmTsVhsBBtRV" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "lie", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "hkXsP7qhxNrQ0" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "'", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "C9RtrovVHvrH33B" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " at", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "fhJHhlmbEWrnY" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " this", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "pvYlADlLGnc" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " time", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "N787ynNkyIU" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " due", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "lkX5gCjexTSI" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ecopEBh7Ckmai" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "Nf1X9c8Z4TduoA" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " tool", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "MtnVKdm0UnR" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " call", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "ExJ8aBPckoF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " limitation", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "jE7bT" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "AaaLnYdPLucETYH" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " Please", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "cPsBAfFXF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " let", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "nGUo5AX3lQpP" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " me", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "shpHT1JYFdHrS" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " know", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "RG8m7peAEPl" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " if", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "i4q8OeCvU08qi" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " there's", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "lXBbPXWn" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " anything", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "EyZRgWl" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " else", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "h87NDUy4I75" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " I", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "1CJqPAnvuBVEXV" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "9Ava6GiwMlu5" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " assist", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "fl9TQoNlV" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "4PwMuL1TPPvZ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": " with", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "XeIvTn2s7ap" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": "!", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "U93F4p2ENgwWFKN" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": null, + "obfuscation": "3P0Kp8n8xH" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-e3e2e64c57bb", + "choices": [], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_c98e05ca17", + "usage": { + "completion_tokens": 53, + "prompt_tokens": 474, + "total_tokens": 527, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "audio_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 0 + }, + "prompt_tokens_details": { + "audio_tokens": 0, + "cached_tokens": 0 + } + }, + "obfuscation": "zjt0xUw7Sz8p9" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/test_tool_responses.py b/tests/integration/responses/test_tool_responses.py index 742d45f8b..49bcd050b 100644 --- a/tests/integration/responses/test_tool_responses.py +++ b/tests/integration/responses/test_tool_responses.py @@ -600,3 +600,155 @@ def test_response_streaming_multi_turn_tool_execution(responses_client, text_mod assert expected_output.lower() in final_response.output_text.lower(), ( f"Expected '{expected_output}' to appear in response: {final_response.output_text}" ) + + +def test_max_tool_calls_with_function_tools(responses_client, text_model_id): + """Test handling of max_tool_calls with function tools in responses.""" + + max_tool_calls = 1 + tools = [ + { + "type": "function", + "name": "get_weather", + "description": "Get weather information for a specified location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name (e.g., 'New York', 'London')", + }, + }, + }, + }, + { + "type": "function", + "name": "get_time", + "description": "Get current time for a specified location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city name (e.g., 'New York', 'London')", + }, + }, + }, + }, + ] + + response = responses_client.responses.create( + model=text_model_id, + input="Can you tell me the weather in Paris and the current time?", + tools=tools, + stream=False, + max_tool_calls=max_tool_calls, + ) + + # Verify we got two function calls and that the max_tool_calls does not affect function tools + assert len(response.output) == 2 + assert response.output[0].type == "function_call" + assert response.output[0].name == "get_weather" + assert response.output[0].status == "completed" + assert response.output[1].type == "function_call" + assert response.output[1].name == "get_time" + assert response.output[1].status == "completed" + + # Verify we have a valid max_tool_calls field + assert response.max_tool_calls == max_tool_calls + + +def test_max_tool_calls_invalid(responses_client, text_model_id): + """Test handling of invalid max_tool_calls in responses.""" + + input = "Search for today's top technology news." + invalid_max_tool_calls = 0 + tools = [ + {"type": "web_search"}, + ] + + # Create a response with an invalid max_tool_calls value i.e. 0 + # Handle ValueError from LLS and BadRequestError from OpenAI client + with pytest.raises((ValueError, llama_stack_client.BadRequestError, openai.BadRequestError)) as excinfo: + responses_client.responses.create( + model=text_model_id, + input=input, + tools=tools, + stream=False, + max_tool_calls=invalid_max_tool_calls, + ) + + error_message = str(excinfo.value) + assert f"Invalid max_tool_calls={invalid_max_tool_calls}; should be >= 1" in error_message, ( + f"Expected error message about invalid max_tool_calls, got: {error_message}" + ) + + +def test_max_tool_calls_with_mcp_tools(responses_client, text_model_id): + """Test handling of max_tool_calls with mcp tools in responses.""" + + with make_mcp_server(tools=dependency_tools()) as mcp_server_info: + input = "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'" + max_tool_calls = [1, 5] + tools = [ + {"type": "mcp", "server_label": "localmcp", "server_url": mcp_server_info["server_url"]}, + ] + + # First create a response that triggers mcp tools without max_tool_calls + response = responses_client.responses.create( + model=text_model_id, + input=input, + tools=tools, + stream=False, + ) + + # Verify we got two mcp tool calls followed by a message + assert len(response.output) == 4 + mcp_list_tools = [output for output in response.output if output.type == "mcp_list_tools"] + mcp_calls = [output for output in response.output if output.type == "mcp_call"] + message_outputs = [output for output in response.output if output.type == "message"] + assert len(mcp_list_tools) == 1 + assert len(mcp_calls) == 2, f"Expected two mcp calls, got {len(mcp_calls)}" + assert len(message_outputs) == 1, f"Expected one message output, got {len(message_outputs)}" + + # Next create a response that triggers mcp tools with max_tool_calls set to 1 + response_2 = responses_client.responses.create( + model=text_model_id, + input=input, + tools=tools, + stream=False, + max_tool_calls=max_tool_calls[0], + ) + + # Verify we got one mcp tool call followed by a message + assert len(response_2.output) == 3 + mcp_list_tools = [output for output in response_2.output if output.type == "mcp_list_tools"] + mcp_calls = [output for output in response_2.output if output.type == "mcp_call"] + message_outputs = [output for output in response_2.output if output.type == "message"] + assert len(mcp_list_tools) == 1 + assert len(mcp_calls) == 1, f"Expected one mcp call, got {len(mcp_calls)}" + assert len(message_outputs) == 1, f"Expected one message output, got {len(message_outputs)}" + + # Verify we have a valid max_tool_calls field + assert response_2.max_tool_calls == max_tool_calls[0] + + # Finally create a response that triggers mcp tools with max_tool_calls set to 5 + response_3 = responses_client.responses.create( + model=text_model_id, + input=input, + tools=tools, + stream=False, + max_tool_calls=max_tool_calls[1], + ) + + # Verify we got two mcp tool calls followed by a message + assert len(response_3.output) == 4 + mcp_list_tools = [output for output in response_3.output if output.type == "mcp_list_tools"] + mcp_calls = [output for output in response_3.output if output.type == "mcp_call"] + message_outputs = [output for output in response_3.output if output.type == "message"] + assert len(mcp_list_tools) == 1 + assert len(mcp_calls) == 2, f"Expected two mcp calls, got {len(mcp_calls)}" + assert len(message_outputs) == 1, f"Expected one message output, got {len(message_outputs)}" + + # Verify we have a valid max_tool_calls field + assert response_3.max_tool_calls == max_tool_calls[1] From 49d6ef8a7032fb40ea0a2504336364fb7acd4712 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Wed, 19 Nov 2025 11:01:52 -0800 Subject: [PATCH 07/14] fix(docs): fix glob vulnerability (#4193) add npm override so docs workspace resolves glob@10.5+ --- docs/package-lock.json | 122 +++++++---------------------------------- docs/package.json | 3 + 2 files changed, 22 insertions(+), 103 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 9a435846f..2a548914c 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -10712,12 +10712,6 @@ "integrity": "sha512-QMUezzXWII9EV5aTFXW1UBVUO77wYPpjqIF8/AviUCThNeSYZykpoTixUeaNNBwmCev0AMDWMAni+f8Hxb1IFw==", "license": "Unlicense" }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "license": "ISC" - }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", @@ -10821,21 +10815,20 @@ "license": "ISC" }, "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", "license": "ISC", "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" }, - "engines": { - "node": "*" + "bin": { + "glob": "dist/esm/bin.mjs" }, "funding": { "url": "https://github.com/sponsors/isaacs" @@ -10859,26 +10852,19 @@ "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", "license": "BSD-2-Clause" }, - "node_modules/glob/node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, "node_modules/glob/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", "license": "ISC", "dependencies": { - "brace-expansion": "^1.1.7" + "brace-expansion": "^2.0.1" }, "engines": { - "node": "*" + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/global-dirs": { @@ -11792,17 +11778,6 @@ "node": ">=12" } }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", - "license": "ISC", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", @@ -15570,15 +15545,6 @@ "node": ">= 0.8" } }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, "node_modules/onetime": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", @@ -15955,15 +15921,6 @@ "node": "^12.20.0 || ^14.13.1 || >=16.0.0" } }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/path-is-inside": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", @@ -20038,41 +19995,6 @@ "node": ">= 6" } }, - "node_modules/sucrase/node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/sucrase/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -21620,12 +21542,6 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "license": "ISC" - }, "node_modules/write-file-atomic": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", diff --git a/docs/package.json b/docs/package.json index d435c65a9..ca4d02ca1 100644 --- a/docs/package.json +++ b/docs/package.json @@ -31,6 +31,9 @@ "react-dom": "^19.0.0", "remark-code-import": "^1.2.0" }, + "overrides": { + "glob": "^10.5.0" + }, "browserslist": { "production": [ ">0.5%", From 88526669821086212affec113d0438324cec3942 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Wed, 19 Nov 2025 11:23:33 -0800 Subject: [PATCH 08/14] chore: remove dead code from openai_compat utility (#4194) Removes a bunch of dead code from `openai_compat.py` --- .../utils/inference/openai_compat.py | 209 ------------------ 1 file changed, 209 deletions(-) diff --git a/src/llama_stack/providers/utils/inference/openai_compat.py b/src/llama_stack/providers/utils/inference/openai_compat.py index 32d41ffde..3ce7d361d 100644 --- a/src/llama_stack/providers/utils/inference/openai_compat.py +++ b/src/llama_stack/providers/utils/inference/openai_compat.py @@ -3,23 +3,10 @@ # # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from collections.abc import Iterable from typing import ( Any, ) -from openai.types.chat import ( - ChatCompletionContentPartParam as OpenAIChatCompletionContentPartParam, -) - -try: - from openai.types.chat import ( - ChatCompletionMessageFunctionToolCall as OpenAIChatCompletionMessageFunctionToolCall, - ) -except ImportError: - from openai.types.chat.chat_completion_message_tool_call import ( - ChatCompletionMessageToolCall as OpenAIChatCompletionMessageFunctionToolCall, - ) from openai.types.chat import ( ChatCompletionMessageToolCall, ) @@ -32,18 +19,6 @@ from llama_stack.models.llama.datatypes import ( ToolCall, ToolDefinition, ) -from llama_stack_api import ( - URL, - GreedySamplingStrategy, - ImageContentItem, - JsonSchemaResponseFormat, - OpenAIResponseFormatParam, - SamplingParams, - TextContentItem, - TopKSamplingStrategy, - TopPSamplingStrategy, - _URLOrData, -) logger = get_logger(name=__name__, category="providers::utils") @@ -73,42 +48,6 @@ class OpenAICompatCompletionResponse(BaseModel): choices: list[OpenAICompatCompletionChoice] -def get_sampling_strategy_options(params: SamplingParams) -> dict: - options = {} - if isinstance(params.strategy, GreedySamplingStrategy): - options["temperature"] = 0.0 - elif isinstance(params.strategy, TopPSamplingStrategy): - if params.strategy.temperature is not None: - options["temperature"] = params.strategy.temperature - if params.strategy.top_p is not None: - options["top_p"] = params.strategy.top_p - elif isinstance(params.strategy, TopKSamplingStrategy): - options["top_k"] = params.strategy.top_k - else: - raise ValueError(f"Unsupported sampling strategy: {params.strategy}") - - return options - - -def get_sampling_options(params: SamplingParams | None) -> dict: - if not params: - return {} - - options = {} - if params: - options.update(get_sampling_strategy_options(params)) - if params.max_tokens: - options["max_tokens"] = params.max_tokens - - if params.repetition_penalty is not None and params.repetition_penalty != 1.0: - options["repeat_penalty"] = params.repetition_penalty - - if params.stop is not None: - options["stop"] = params.stop - - return options - - def text_from_choice(choice) -> str: if hasattr(choice, "delta") and choice.delta: return choice.delta.content # type: ignore[no-any-return] # external OpenAI types lack precise annotations @@ -253,154 +192,6 @@ def convert_tooldef_to_openai_tool(tool: ToolDefinition) -> dict: return out -def _convert_stop_reason_to_openai_finish_reason(stop_reason: StopReason) -> str: - """ - Convert a StopReason to an OpenAI chat completion finish_reason. - """ - return { - StopReason.end_of_turn: "stop", - StopReason.end_of_message: "tool_calls", - StopReason.out_of_tokens: "length", - }.get(stop_reason, "stop") - - -def _convert_openai_finish_reason(finish_reason: str) -> StopReason: - """ - Convert an OpenAI chat completion finish_reason to a StopReason. - - finish_reason: Literal["stop", "length", "tool_calls", ...] - - stop: model hit a natural stop point or a provided stop sequence - - length: maximum number of tokens specified in the request was reached - - tool_calls: model called a tool - - -> - - class StopReason(Enum): - end_of_turn = "end_of_turn" - end_of_message = "end_of_message" - out_of_tokens = "out_of_tokens" - """ - - # TODO(mf): are end_of_turn and end_of_message semantics correct? - return { - "stop": StopReason.end_of_turn, - "length": StopReason.out_of_tokens, - "tool_calls": StopReason.end_of_message, - }.get(finish_reason, StopReason.end_of_turn) - - -def _convert_openai_request_tools(tools: list[dict[str, Any]] | None = None) -> list[ToolDefinition]: - lls_tools: list[ToolDefinition] = [] - if not tools: - return lls_tools - - for tool in tools: - tool_fn = tool.get("function", {}) - tool_name = tool_fn.get("name", None) - tool_desc = tool_fn.get("description", None) - tool_params = tool_fn.get("parameters", None) - - lls_tool = ToolDefinition( - tool_name=tool_name, - description=tool_desc, - input_schema=tool_params, # Pass through entire JSON Schema - ) - lls_tools.append(lls_tool) - return lls_tools - - -def _convert_openai_request_response_format( - response_format: OpenAIResponseFormatParam | None = None, -): - if not response_format: - return None - # response_format can be a dict or a pydantic model - response_format_dict = dict(response_format) # type: ignore[arg-type] # OpenAIResponseFormatParam union needs dict conversion - if response_format_dict.get("type", "") == "json_schema": - return JsonSchemaResponseFormat( - type="json_schema", # type: ignore[arg-type] # Literal["json_schema"] incompatible with expected type - json_schema=response_format_dict.get("json_schema", {}).get("schema", ""), - ) - return None - - -def _convert_openai_tool_calls( - tool_calls: list[OpenAIChatCompletionMessageFunctionToolCall], -) -> list[ToolCall]: - """ - Convert an OpenAI ChatCompletionMessageToolCall list into a list of ToolCall. - - OpenAI ChatCompletionMessageToolCall: - id: str - function: Function - type: Literal["function"] - - OpenAI Function: - arguments: str - name: str - - -> - - ToolCall: - call_id: str - tool_name: str - arguments: Dict[str, ...] - """ - if not tool_calls: - return [] # CompletionMessage tool_calls is not optional - - return [ - ToolCall( - call_id=call.id, - tool_name=call.function.name, - arguments=call.function.arguments, - ) - for call in tool_calls - ] - - -def _convert_openai_sampling_params( - max_tokens: int | None = None, - temperature: float | None = None, - top_p: float | None = None, -) -> SamplingParams: - sampling_params = SamplingParams() - - if max_tokens: - sampling_params.max_tokens = max_tokens - - # Map an explicit temperature of 0 to greedy sampling - if temperature == 0: - sampling_params.strategy = GreedySamplingStrategy() - else: - # OpenAI defaults to 1.0 for temperature and top_p if unset - if temperature is None: - temperature = 1.0 - if top_p is None: - top_p = 1.0 - sampling_params.strategy = TopPSamplingStrategy(temperature=temperature, top_p=top_p) # type: ignore[assignment] # SamplingParams.strategy union accepts this type - - return sampling_params - - -def openai_content_to_content(content: str | Iterable[OpenAIChatCompletionContentPartParam] | None): - if content is None: - return "" - if isinstance(content, str): - return content - elif isinstance(content, list): - return [openai_content_to_content(c) for c in content] - elif hasattr(content, "type"): - if content.type == "text": - return TextContentItem(type="text", text=content.text) # type: ignore[attr-defined] # Iterable narrowed by hasattr check but mypy doesn't track - elif content.type == "image_url": - return ImageContentItem(type="image", image=_URLOrData(url=URL(uri=content.image_url.url))) # type: ignore[attr-defined] # Iterable narrowed by hasattr check but mypy doesn't track - else: - raise ValueError(f"Unknown content type: {content.type}") - else: - raise ValueError(f"Unknown content type: {content}") - - async def prepare_openai_completion_params(**params): async def _prepare_value(value: Any) -> Any: new_value = value From 0757d5a9170011f691c955438736726e6b5fd70e Mon Sep 17 00:00:00 2001 From: Ian Miller <75687988+r3v5@users.noreply.github.com> Date: Wed, 19 Nov 2025 19:48:11 +0000 Subject: [PATCH 09/14] feat(responses)!: implement support for OpenAI compatible prompts in Responses API (#3965) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What does this PR do? This PR is responsible for providing actual implementation of OpenAI compatible prompts in Responses API. This is the follow up PR with actual implementation after introducing #3942 The need of this functionality was initiated in #3514. > Note, https://github.com/llamastack/llama-stack/pull/3514 is divided on three separate PRs. Current PR is the third of three. Closes #3321 ## Test Plan Manual testing, CI workflow with added unit tests Comprehensive manual testing with new implementation: **Test Prompts with Images with text on them in Responses API:** I used this image for testing purposes: [iphone 17 image](https://github.com/user-attachments/assets/9e2ee821-e394-4bbd-b1c8-d48a3fa315de) 1. Upload an image: ``` curl -X POST http://localhost:8321/v1/files \ -H "Content-Type: multipart/form-data" \ -F "file=@/Users/ianmiller/iphone.jpeg" \ -F "purpose=assistants" ``` `{"object":"file","id":"file-d6d375f238e14f21952cc40246bc8504","bytes":556241,"created_at":1761750049,"expires_at":1793286049,"filename":"iphone.jpeg","purpose":"assistants"}%` 2. Create prompt: ``` curl -X POST http://localhost:8321/v1/prompts \ -H "Content-Type: application/json" \ -d '{ "prompt": "You are a product analysis expert. Analyze the following product:\n\nProduct Name: {{product_name}}\nDescription: {{description}}\n\nImage: {{product_photo}}\n\nProvide a detailed analysis including quality assessment, target audience, and pricing recommendations.", "variables": ["product_name", "description", "product_photo"] }' ``` `{"prompt":"You are a product analysis expert. Analyze the following product:\n\nProduct Name: {{product_name}}\nDescription: {{description}}\n\nImage: {{product_photo}}\n\nProvide a detailed analysis including quality assessment, target audience, and pricing recommendations.","version":1,"prompt_id":"pmpt_7be2208cb82cdbc35356354dae1f335d1e9b7baeca21ea62","variables":["product_name","description","product_photo"],"is_default":false}%` 3. Create response: ``` curl -X POST http://localhost:8321/v1/responses \ -H "Accept: application/json, text/event-stream" \ -H "Content-Type: application/json" \ -d '{ "input": "Please analyze this product", "model": "openai/gpt-4o", "store": true, "prompt": { "id": "pmpt_7be2208cb82cdbc35356354dae1f335d1e9b7baeca21ea62", "version": "1", "variables": { "product_name": { "type": "input_text", "text": "iPhone 17 Pro Max" }, "product_photo": { "type": "input_image", "file_id": "file-d6d375f238e14f21952cc40246bc8504", "detail": "high" } } } }' ``` `{"created_at":1761750427,"error":null,"id":"resp_f897f914-e3b8-4783-8223-3ed0d32fcbc6","model":"openai/gpt-4o","object":"response","output":[{"content":[{"text":"### Product Analysis: iPhone 17 Pro Max\n\n**Quality Assessment:**\n\n- **Display & Design:**\n - The 6.9-inch display is large, ideal for streaming and productivity.\n - Anti-reflective technology and 120Hz refresh rate enhance viewing experience, providing smoother visuals and reducing glare.\n - Titanium frame suggests a premium build, offering durability and a sleek appearance.\n\n- **Performance:**\n - The Apple A19 Pro chip promises significant performance improvements, likely leading to faster processing and efficient multitasking.\n - 12GB RAM is substantial for a smartphone, ensuring smooth operation for demanding apps and games.\n\n- **Camera System:**\n - The triple 48MP camera setup (wide, ultra-wide, telephoto) is designed for versatile photography needs, capturing high-resolution photos and videos.\n - The 24MP front camera will appeal to selfie enthusiasts and content creators needing quality front-facing shots.\n\n- **Connectivity:**\n - Wi-Fi 7 support indicates future-proof wireless capabilities, providing faster and more reliable internet connectivity.\n\n**Target Audience:**\n\n- **Tech Enthusiasts:** Individuals interested in cutting-edge technology and performance.\n- **Content Creators:** Users who need a robust camera system for photo and video production.\n- **Luxury Consumers:** Those who prefer premium materials and top-of-the-line specs.\n- **Professionals:** Users who require efficient multitasking and productivity features.\n\n**Pricing Recommendations:**\n\n- Given the premium specifications, a higher price point is expected. Consider pricing competitively within the high-end smartphone market while justifying cost through unique features like the titanium frame and advanced connectivity options.\n- Positioning around the $1,200 to $1,500 range would align with expectations for top-tier devices, catering to its target audience while ensuring profitability.\n\nOverall, the iPhone 17 Pro Max showcases a blend of innovative features and premium design, aimed at users seeking high performance and superior aesthetics.","type":"output_text","annotations":[]}],"role":"assistant","type":"message","id":"msg_66f4d844-4d9e-4102-80fc-eb75b34b6dbd","status":"completed"}],"parallel_tool_calls":false,"previous_response_id":null,"prompt":{"id":"pmpt_7be2208cb82cdbc35356354dae1f335d1e9b7baeca21ea62","variables":{"product_name":{"text":"iPhone 17 Pro Max","type":"input_text"},"product_photo":{"detail":"high","type":"input_image","file_id":"file-d6d375f238e14f21952cc40246bc8504","image_url":null}},"version":"1"},"status":"completed","temperature":null,"text":{"format":{"type":"text"}},"top_p":null,"tools":[],"truncation":null,"usage":{"input_tokens":830,"output_tokens":394,"total_tokens":1224,"input_tokens_details":{"cached_tokens":0},"output_tokens_details":{"reasoning_tokens":0}},"instructions":null}%` **Test Prompts with PDF files in Responses API:** I used this PDF file for testing purposes: [invoicesample.pdf](https://github.com/user-attachments/files/22958943/invoicesample.pdf) 1. Upload PDF: ``` curl -X POST http://localhost:8321/v1/files \ -H "Content-Type: multipart/form-data" \ -F "file=@/Users/ianmiller/invoicesample.pdf" \ -F "purpose=assistants" ``` `{"object":"file","id":"file-7fbb1043a4bb468cab60ffe4b8631d8e","bytes":149568,"created_at":1761750730,"expires_at":1793286730,"filename":"invoicesample.pdf","purpose":"assistants"}%` 2. Create prompt: ``` curl -X POST http://localhost:8321/v1/prompts \ -H "Content-Type: application/json" \ -d '{ "prompt": "You are an accounting and financial analysis expert. Analyze the following invoice document:\n\nInvoice Document: {{invoice_doc}}\n\nProvide a comprehensive analysis", "variables": ["invoice_doc"] }' ``` `{"prompt":"You are an accounting and financial analysis expert. Analyze the following invoice document:\n\nInvoice Document: {{invoice_doc}}\n\nProvide a comprehensive analysis","version":1,"prompt_id":"pmpt_72e2a184a86f32a568b6afb5455dca5c16bf3cc3f80092dc","variables":["invoice_doc"],"is_default":false}%` 3. Create response: ``` curl -X POST http://localhost:8321/v1/responses \ -H "Content-Type: application/json" \ -d '{ "input": "Please provide a detailed analysis of this invoice", "model": "openai/gpt-4o", "store": true, "prompt": { "id": "pmpt_72e2a184a86f32a568b6afb5455dca5c16bf3cc3f80092dc", "version": "1", "variables": { "invoice_doc": { "type": "input_file", "file_id": "file-7fbb1043a4bb468cab60ffe4b8631d8e", "filename": "invoicesample.pdf" } } } }' ``` `{"created_at":1761750881,"error":null,"id":"resp_da866913-db06-4702-8000-174daed9dbbb","model":"openai/gpt-4o","object":"response","output":[{"content":[{"text":"Here's a detailed analysis of the invoice provided:\n\n### Seller Information\n- **Business Name:** The invoice features a logo with \"Sunny Farm\" indicating the business identity.\n- **Address:** 123 Somewhere St, Melbourne VIC 3000\n- **Contact Information:** Phone number (03) 1234 5678\n\n### Buyer Information\n- **Name:** Denny Gunawan\n- **Address:** 221 Queen St, Melbourne VIC 3000\n\n### Transaction Details\n- **Invoice Number:** #20130304\n- **Date of Transaction:** Not explicitly mentioned, likely inferred from the invoice number or needs clarification.\n\n### Items Purchased\n1. **Apple**\n - Price: $5.00/kg\n - Quantity: 1 kg\n - Subtotal: $5.00\n\n2. **Orange**\n - Price: $1.99/kg\n - Quantity: 2 kg\n - Subtotal: $3.98\n\n3. **Watermelon**\n - Price: $1.69/kg\n - Quantity: 3 kg\n - Subtotal: $5.07\n\n4. **Mango**\n - Price: $9.56/kg\n - Quantity: 2 kg\n - Subtotal: $19.12\n\n5. **Peach**\n - Price: $2.99/kg\n - Quantity: 1 kg\n - Subtotal: $2.99\n\n### Financial Summary\n- **Subtotal for Items:** $36.00\n- **GST (Goods and Services Tax):** 10% of $36.00, which amounts to $3.60\n- **Total Amount Due:** $39.60\n\n### Notes\n- The invoice includes a placeholder text: \"Lorem ipsum dolor sit amet...\" which is typically used as filler text. This might indicate a section intended for terms, conditions, or additional notes that haven’t been completed.\n\n### Visual and Design Elements\n- The invoice uses a simple and clear layout, featuring the business logo prominently and stating essential information such as contact and transaction details in a structured manner.\n- There is a \"Thank You\" note at the bottom, which adds a professional and courteous touch.\n\n### Considerations\n- Ensure the date of the transaction is clear if there are any future references needed.\n- Replace filler text with relevant terms and conditions or any special instructions pertaining to the transaction.\n\nThis invoice appears standard, representing a small business transaction with clearly itemized products and applicable taxes.","type":"output_text","annotations":[]}],"role":"assistant","type":"message","id":"msg_39f3b39e-4684-4444-8e4d-e7395f88c9dc","status":"completed"}],"parallel_tool_calls":false,"previous_response_id":null,"prompt":{"id":"pmpt_72e2a184a86f32a568b6afb5455dca5c16bf3cc3f80092dc","variables":{"invoice_doc":{"type":"input_file","file_data":null,"file_id":"file-7fbb1043a4bb468cab60ffe4b8631d8e","file_url":null,"filename":"invoicesample.pdf"}},"version":"1"},"status":"completed","temperature":null,"text":{"format":{"type":"text"}},"top_p":null,"tools":[],"truncation":null,"usage":{"input_tokens":529,"output_tokens":513,"total_tokens":1042,"input_tokens_details":{"cached_tokens":0},"output_tokens_details":{"reasoning_tokens":0}},"instructions":null}%` **Test simple text Prompt in Responses API:** 1. Create prompt: ``` curl -X POST http://localhost:8321/v1/prompts \ -H "Content-Type: application/json" \ -d '{ "prompt": "Hello {{name}}! You are working at {{company}}. Your role is {{role}} at {{company}}. Remember, {{name}}, to be {{tone}}.", "variables": ["name", "company", "role", "tone"] }' ``` `{"prompt":"Hello {{name}}! You are working at {{company}}. Your role is {{role}} at {{company}}. Remember, {{name}}, to be {{tone}}.","version":1,"prompt_id":"pmpt_f340a3164a4f65d975c774ffe38ea42d15e7ce4a835919ef","variables":["name","company","role","tone"],"is_default":false}%` 2. Create response: ``` curl -X POST http://localhost:8321/v1/responses \ -H "Accept: application/json, text/event-stream" \ -H "Content-Type: application/json" \ -d '{ "input": "What is the capital of Ireland?", "model": "openai/gpt-4o", "store": true, "prompt": { "id": "pmpt_f340a3164a4f65d975c774ffe38ea42d15e7ce4a835919ef", "version": "1", "variables": { "name": { "type": "input_text", "text": "Alice" }, "company": { "type": "input_text", "text": "Dummy Company" }, "role": { "type": "input_text", "text": "Geography expert" }, "tone": { "type": "input_text", "text": "professional and helpful" } } } }' ``` `{"created_at":1761751097,"error":null,"id":"resp_1b037b95-d9ae-4ad0-8e76-d953897ecaef","model":"openai/gpt-4o","object":"response","output":[{"content":[{"text":"The capital of Ireland is Dublin.","type":"output_text","annotations":[]}],"role":"assistant","type":"message","id":"msg_8e7c72b6-2aa2-4da6-8e57-da4e12fa3ce2","status":"completed"}],"parallel_tool_calls":false,"previous_response_id":null,"prompt":{"id":"pmpt_f340a3164a4f65d975c774ffe38ea42d15e7ce4a835919ef","variables":{"name":{"text":"Alice","type":"input_text"},"company":{"text":"Dummy Company","type":"input_text"},"role":{"text":"Geography expert","type":"input_text"},"tone":{"text":"professional and helpful","type":"input_text"}},"version":"1"},"status":"completed","temperature":null,"text":{"format":{"type":"text"}},"top_p":null,"tools":[],"truncation":null,"usage":{"input_tokens":47,"output_tokens":7,"total_tokens":54,"input_tokens_details":{"cached_tokens":0},"output_tokens_details":{"reasoning_tokens":0}},"instructions":null}%` --- .../inline/agents/meta_reference/__init__.py | 4 +- .../inline/agents/meta_reference/agents.py | 9 +- .../responses/openai_responses.py | 98 +++- .../agents/meta_reference/responses/utils.py | 123 ++++- src/llama_stack/providers/registry/agents.py | 2 + .../meta_reference/test_openai_responses.py | 517 +++++++++++++++++- .../test_openai_responses_conversations.py | 4 + .../test_response_conversion_utils.py | 20 +- .../test_responses_safety_utils.py | 2 + .../meta_reference/test_safety_optional.py | 8 + 10 files changed, 770 insertions(+), 17 deletions(-) diff --git a/src/llama_stack/providers/inline/agents/meta_reference/__init__.py b/src/llama_stack/providers/inline/agents/meta_reference/__init__.py index b3fb814e3..9683baf00 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/__init__.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/__init__.py @@ -27,8 +27,10 @@ async def get_provider_impl( deps[Api.tool_runtime], deps[Api.tool_groups], deps[Api.conversations], - policy, + deps[Api.prompts], + deps[Api.files], telemetry_enabled, + policy, ) await impl.initialize() return impl diff --git a/src/llama_stack/providers/inline/agents/meta_reference/agents.py b/src/llama_stack/providers/inline/agents/meta_reference/agents.py index 2d5aa6c04..ca419a51a 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/agents.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/agents.py @@ -12,6 +12,7 @@ from llama_stack.providers.utils.responses.responses_store import ResponsesStore from llama_stack_api import ( Agents, Conversations, + Files, Inference, ListOpenAIResponseInputItem, ListOpenAIResponseObject, @@ -22,6 +23,7 @@ from llama_stack_api import ( OpenAIResponsePrompt, OpenAIResponseText, Order, + Prompts, ResponseGuardrail, Safety, ToolGroups, @@ -45,6 +47,8 @@ class MetaReferenceAgentsImpl(Agents): tool_runtime_api: ToolRuntime, tool_groups_api: ToolGroups, conversations_api: Conversations, + prompts_api: Prompts, + files_api: Files, policy: list[AccessRule], telemetry_enabled: bool = False, ): @@ -56,7 +60,8 @@ class MetaReferenceAgentsImpl(Agents): self.tool_groups_api = tool_groups_api self.conversations_api = conversations_api self.telemetry_enabled = telemetry_enabled - + self.prompts_api = prompts_api + self.files_api = files_api self.in_memory_store = InmemoryKVStoreImpl() self.openai_responses_impl: OpenAIResponsesImpl | None = None self.policy = policy @@ -73,6 +78,8 @@ class MetaReferenceAgentsImpl(Agents): vector_io_api=self.vector_io_api, safety_api=self.safety_api, conversations_api=self.conversations_api, + prompts_api=self.prompts_api, + files_api=self.files_api, ) async def shutdown(self) -> None: diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py index 11bfb1417..c8282df69 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py @@ -4,6 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +import re import time import uuid from collections.abc import AsyncIterator @@ -18,13 +19,17 @@ from llama_stack.providers.utils.responses.responses_store import ( from llama_stack_api import ( ConversationItem, Conversations, + Files, Inference, InvalidConversationIdError, ListOpenAIResponseInputItem, ListOpenAIResponseObject, + OpenAIChatCompletionContentPartParam, OpenAIDeleteResponseObject, OpenAIMessageParam, OpenAIResponseInput, + OpenAIResponseInputMessageContentFile, + OpenAIResponseInputMessageContentImage, OpenAIResponseInputMessageContentText, OpenAIResponseInputTool, OpenAIResponseMessage, @@ -34,7 +39,9 @@ from llama_stack_api import ( OpenAIResponseText, OpenAIResponseTextFormat, OpenAISystemMessageParam, + OpenAIUserMessageParam, Order, + Prompts, ResponseGuardrailSpec, Safety, ToolGroups, @@ -46,6 +53,7 @@ from .streaming import StreamingResponseOrchestrator from .tool_executor import ToolExecutor from .types import ChatCompletionContext, ToolContext from .utils import ( + convert_response_content_to_chat_content, convert_response_input_to_chat_messages, convert_response_text_to_chat_response_format, extract_guardrail_ids, @@ -69,6 +77,8 @@ class OpenAIResponsesImpl: vector_io_api: VectorIO, # VectorIO safety_api: Safety | None, conversations_api: Conversations, + prompts_api: Prompts, + files_api: Files, ): self.inference_api = inference_api self.tool_groups_api = tool_groups_api @@ -82,6 +92,8 @@ class OpenAIResponsesImpl: tool_runtime_api=tool_runtime_api, vector_io_api=vector_io_api, ) + self.prompts_api = prompts_api + self.files_api = files_api async def _prepend_previous_response( self, @@ -122,11 +134,13 @@ class OpenAIResponsesImpl: # Use stored messages directly and convert only new input message_adapter = TypeAdapter(list[OpenAIMessageParam]) messages = message_adapter.validate_python(previous_response.messages) - new_messages = await convert_response_input_to_chat_messages(input, previous_messages=messages) + new_messages = await convert_response_input_to_chat_messages( + input, previous_messages=messages, files_api=self.files_api + ) messages.extend(new_messages) else: # Backward compatibility: reconstruct from inputs - messages = await convert_response_input_to_chat_messages(all_input) + messages = await convert_response_input_to_chat_messages(all_input, files_api=self.files_api) tool_context.recover_tools_from_previous_response(previous_response) elif conversation is not None: @@ -138,7 +152,7 @@ class OpenAIResponsesImpl: all_input = input if not conversation_items.data: # First turn - just convert the new input - messages = await convert_response_input_to_chat_messages(input) + messages = await convert_response_input_to_chat_messages(input, files_api=self.files_api) else: if not stored_messages: all_input = conversation_items.data @@ -154,14 +168,82 @@ class OpenAIResponsesImpl: all_input = input messages = stored_messages or [] - new_messages = await convert_response_input_to_chat_messages(all_input, previous_messages=messages) + new_messages = await convert_response_input_to_chat_messages( + all_input, previous_messages=messages, files_api=self.files_api + ) messages.extend(new_messages) else: all_input = input - messages = await convert_response_input_to_chat_messages(all_input) + messages = await convert_response_input_to_chat_messages(all_input, files_api=self.files_api) return all_input, messages, tool_context + async def _prepend_prompt( + self, + messages: list[OpenAIMessageParam], + openai_response_prompt: OpenAIResponsePrompt | None, + ) -> None: + """Prepend prompt template to messages, resolving text/image/file variables. + + :param messages: List of OpenAIMessageParam objects + :param openai_response_prompt: (Optional) OpenAIResponsePrompt object with variables + :returns: string of utf-8 characters + """ + if not openai_response_prompt or not openai_response_prompt.id: + return + + prompt_version = int(openai_response_prompt.version) if openai_response_prompt.version else None + cur_prompt = await self.prompts_api.get_prompt(openai_response_prompt.id, prompt_version) + + if not cur_prompt or not cur_prompt.prompt: + return + + cur_prompt_text = cur_prompt.prompt + cur_prompt_variables = cur_prompt.variables + + if not openai_response_prompt.variables: + messages.insert(0, OpenAISystemMessageParam(content=cur_prompt_text)) + return + + # Validate that all provided variables exist in the prompt + for name in openai_response_prompt.variables.keys(): + if name not in cur_prompt_variables: + raise ValueError(f"Variable {name} not found in prompt {openai_response_prompt.id}") + + # Separate text and media variables + text_substitutions = {} + media_content_parts: list[OpenAIChatCompletionContentPartParam] = [] + + for name, value in openai_response_prompt.variables.items(): + # Text variable found + if isinstance(value, OpenAIResponseInputMessageContentText): + text_substitutions[name] = value.text + + # Media variable found + elif isinstance(value, OpenAIResponseInputMessageContentImage | OpenAIResponseInputMessageContentFile): + converted_parts = await convert_response_content_to_chat_content([value], files_api=self.files_api) + if isinstance(converted_parts, list): + media_content_parts.extend(converted_parts) + + # Eg: {{product_photo}} becomes "[Image: product_photo]" + # This gives the model textual context about what media exists in the prompt + var_type = value.type.replace("input_", "").replace("_", " ").title() + text_substitutions[name] = f"[{var_type}: {name}]" + + def replace_variable(match: re.Match[str]) -> str: + var_name = match.group(1).strip() + return str(text_substitutions.get(var_name, match.group(0))) + + pattern = r"\{\{\s*(\w+)\s*\}\}" + processed_prompt_text = re.sub(pattern, replace_variable, cur_prompt_text) + + # Insert system message with resolved text + messages.insert(0, OpenAISystemMessageParam(content=processed_prompt_text)) + + # If we have media, create a new user message because allows to ingest images and files + if media_content_parts: + messages.append(OpenAIUserMessageParam(content=media_content_parts)) + async def get_openai_response( self, response_id: str, @@ -297,6 +379,7 @@ class OpenAIResponsesImpl: input=input, conversation=conversation, model=model, + prompt=prompt, instructions=instructions, previous_response_id=previous_response_id, store=store, @@ -350,6 +433,7 @@ class OpenAIResponsesImpl: instructions: str | None = None, previous_response_id: str | None = None, conversation: str | None = None, + prompt: OpenAIResponsePrompt | None = None, store: bool | None = True, temperature: float | None = None, text: OpenAIResponseText | None = None, @@ -372,6 +456,9 @@ class OpenAIResponsesImpl: if instructions: messages.insert(0, OpenAISystemMessageParam(content=instructions)) + # Prepend reusable prompt (if provided) + await self._prepend_prompt(messages, prompt) + # Structured outputs response_format = await convert_response_text_to_chat_response_format(text) @@ -394,6 +481,7 @@ class OpenAIResponsesImpl: ctx=ctx, response_id=response_id, created_at=created_at, + prompt=prompt, text=text, max_infer_iters=max_infer_iters, parallel_tool_calls=parallel_tool_calls, diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py index 25460bcfe..7bbf6bd30 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/utils.py @@ -5,11 +5,14 @@ # the root directory of this source tree. import asyncio +import base64 +import mimetypes import re import uuid from collections.abc import Sequence from llama_stack_api import ( + Files, OpenAIAssistantMessageParam, OpenAIChatCompletionContentPartImageParam, OpenAIChatCompletionContentPartParam, @@ -18,6 +21,8 @@ from llama_stack_api import ( OpenAIChatCompletionToolCallFunction, OpenAIChoice, OpenAIDeveloperMessageParam, + OpenAIFile, + OpenAIFileFile, OpenAIImageURL, OpenAIJSONSchema, OpenAIMessageParam, @@ -29,6 +34,7 @@ from llama_stack_api import ( OpenAIResponseInput, OpenAIResponseInputFunctionToolCallOutput, OpenAIResponseInputMessageContent, + OpenAIResponseInputMessageContentFile, OpenAIResponseInputMessageContentImage, OpenAIResponseInputMessageContentText, OpenAIResponseInputTool, @@ -37,9 +43,11 @@ from llama_stack_api import ( OpenAIResponseMessage, OpenAIResponseOutputMessageContent, OpenAIResponseOutputMessageContentOutputText, + OpenAIResponseOutputMessageFileSearchToolCall, OpenAIResponseOutputMessageFunctionToolCall, OpenAIResponseOutputMessageMCPCall, OpenAIResponseOutputMessageMCPListTools, + OpenAIResponseOutputMessageWebSearchToolCall, OpenAIResponseText, OpenAISystemMessageParam, OpenAIToolMessageParam, @@ -49,6 +57,46 @@ from llama_stack_api import ( ) +async def extract_bytes_from_file(file_id: str, files_api: Files) -> bytes: + """ + Extract raw bytes from file using the Files API. + + :param file_id: The file identifier (e.g., "file-abc123") + :param files_api: Files API instance + :returns: Raw file content as bytes + :raises: ValueError if file cannot be retrieved + """ + try: + response = await files_api.openai_retrieve_file_content(file_id) + return bytes(response.body) + except Exception as e: + raise ValueError(f"Failed to retrieve file content for file_id '{file_id}': {str(e)}") from e + + +def generate_base64_ascii_text_from_bytes(raw_bytes: bytes) -> str: + """ + Converts raw binary bytes into a safe ASCII text representation for URLs + + :param raw_bytes: the actual bytes that represents file content + :returns: string of utf-8 characters + """ + return base64.b64encode(raw_bytes).decode("utf-8") + + +def construct_data_url(ascii_text: str, mime_type: str | None) -> str: + """ + Construct data url with decoded data inside + + :param ascii_text: ASCII content + :param mime_type: MIME type of file + :returns: data url string (eg. data:image/png,base64,%3Ch1%3EHello%2C%20World%21%3C%2Fh1%3E) + """ + if not mime_type: + mime_type = "application/octet-stream" + + return f"data:{mime_type};base64,{ascii_text}" + + async def convert_chat_choice_to_response_message( choice: OpenAIChoice, citation_files: dict[str, str] | None = None, @@ -78,11 +126,15 @@ async def convert_chat_choice_to_response_message( async def convert_response_content_to_chat_content( content: str | Sequence[OpenAIResponseInputMessageContent | OpenAIResponseOutputMessageContent], + files_api: Files | None, ) -> str | list[OpenAIChatCompletionContentPartParam]: """ Convert the content parts from an OpenAI Response API request into OpenAI Chat Completion content parts. The content schemas of each API look similar, but are not exactly the same. + + :param content: The content to convert + :param files_api: Files API for resolving file_id to raw file content (required if content contains files/images) """ if isinstance(content, str): return content @@ -95,9 +147,68 @@ async def convert_response_content_to_chat_content( elif isinstance(content_part, OpenAIResponseOutputMessageContentOutputText): converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part.text)) elif isinstance(content_part, OpenAIResponseInputMessageContentImage): + detail = content_part.detail + image_mime_type = None if content_part.image_url: - image_url = OpenAIImageURL(url=content_part.image_url, detail=content_part.detail) + image_url = OpenAIImageURL(url=content_part.image_url, detail=detail) converted_parts.append(OpenAIChatCompletionContentPartImageParam(image_url=image_url)) + elif content_part.file_id: + if files_api is None: + raise ValueError("file_ids are not supported by this implementation of the Stack") + image_file_response = await files_api.openai_retrieve_file(content_part.file_id) + if image_file_response.filename: + image_mime_type, _ = mimetypes.guess_type(image_file_response.filename) + raw_image_bytes = await extract_bytes_from_file(content_part.file_id, files_api) + ascii_text = generate_base64_ascii_text_from_bytes(raw_image_bytes) + image_data_url = construct_data_url(ascii_text, image_mime_type) + image_url = OpenAIImageURL(url=image_data_url, detail=detail) + converted_parts.append(OpenAIChatCompletionContentPartImageParam(image_url=image_url)) + else: + raise ValueError( + f"Image content must have either 'image_url' or 'file_id'. " + f"Got image_url={content_part.image_url}, file_id={content_part.file_id}" + ) + elif isinstance(content_part, OpenAIResponseInputMessageContentFile): + resolved_file_data = None + file_data = content_part.file_data + file_id = content_part.file_id + file_url = content_part.file_url + filename = content_part.filename + file_mime_type = None + if not any([file_data, file_id, file_url]): + raise ValueError( + f"File content must have at least one of 'file_data', 'file_id', or 'file_url'. " + f"Got file_data={file_data}, file_id={file_id}, file_url={file_url}" + ) + if file_id: + if files_api is None: + raise ValueError("file_ids are not supported by this implementation of the Stack") + + file_response = await files_api.openai_retrieve_file(file_id) + if not filename: + filename = file_response.filename + file_mime_type, _ = mimetypes.guess_type(file_response.filename) + raw_file_bytes = await extract_bytes_from_file(file_id, files_api) + ascii_text = generate_base64_ascii_text_from_bytes(raw_file_bytes) + resolved_file_data = construct_data_url(ascii_text, file_mime_type) + elif file_data: + if file_data.startswith("data:"): + resolved_file_data = file_data + else: + # Raw base64 data, wrap in data URL format + if filename: + file_mime_type, _ = mimetypes.guess_type(filename) + resolved_file_data = construct_data_url(file_data, file_mime_type) + elif file_url: + resolved_file_data = file_url + converted_parts.append( + OpenAIFile( + file=OpenAIFileFile( + file_data=resolved_file_data, + filename=filename, + ) + ) + ) elif isinstance(content_part, str): converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part)) else: @@ -110,12 +221,14 @@ async def convert_response_content_to_chat_content( async def convert_response_input_to_chat_messages( input: str | list[OpenAIResponseInput], previous_messages: list[OpenAIMessageParam] | None = None, + files_api: Files | None = None, ) -> list[OpenAIMessageParam]: """ Convert the input from an OpenAI Response API request into OpenAI Chat Completion messages. :param input: The input to convert :param previous_messages: Optional previous messages to check for function_call references + :param files_api: Files API for resolving file_id to raw file content (optional, required for file/image content) """ messages: list[OpenAIMessageParam] = [] if isinstance(input, list): @@ -169,6 +282,12 @@ async def convert_response_input_to_chat_messages( elif isinstance(input_item, OpenAIResponseOutputMessageMCPListTools): # the tool list will be handled separately pass + elif isinstance( + input_item, + OpenAIResponseOutputMessageWebSearchToolCall | OpenAIResponseOutputMessageFileSearchToolCall, + ): + # these tool calls are tracked internally but not converted to chat messages + pass elif isinstance(input_item, OpenAIResponseMCPApprovalRequest) or isinstance( input_item, OpenAIResponseMCPApprovalResponse ): @@ -176,7 +295,7 @@ async def convert_response_input_to_chat_messages( pass elif isinstance(input_item, OpenAIResponseMessage): # Narrow type to OpenAIResponseMessage which has content and role attributes - content = await convert_response_content_to_chat_content(input_item.content) + content = await convert_response_content_to_chat_content(input_item.content, files_api) message_type = await get_message_type_by_role(input_item.role) if message_type is None: raise ValueError( diff --git a/src/llama_stack/providers/registry/agents.py b/src/llama_stack/providers/registry/agents.py index e85be99d6..22bb45faf 100644 --- a/src/llama_stack/providers/registry/agents.py +++ b/src/llama_stack/providers/registry/agents.py @@ -34,6 +34,8 @@ def available_providers() -> list[ProviderSpec]: Api.tool_runtime, Api.tool_groups, Api.conversations, + Api.prompts, + Api.files, ], optional_api_dependencies=[ Api.safety, diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses.py b/tests/unit/providers/agents/meta_reference/test_openai_responses.py index 256df6baf..97bccbfe4 100644 --- a/tests/unit/providers/agents/meta_reference/test_openai_responses.py +++ b/tests/unit/providers/agents/meta_reference/test_openai_responses.py @@ -25,6 +25,13 @@ from llama_stack.providers.utils.responses.responses_store import ( ResponsesStore, _OpenAIResponseObjectWithInputAndMessages, ) +from llama_stack_api import ( + OpenAIChatCompletionContentPartImageParam, + OpenAIFile, + OpenAIFileObject, + OpenAISystemMessageParam, + Prompt, +) from llama_stack_api.agents import Order from llama_stack_api.inference import ( OpenAIAssistantMessageParam, @@ -38,6 +45,8 @@ from llama_stack_api.inference import ( ) from llama_stack_api.openai_responses import ( ListOpenAIResponseInputItem, + OpenAIResponseInputMessageContentFile, + OpenAIResponseInputMessageContentImage, OpenAIResponseInputMessageContentText, OpenAIResponseInputToolFunction, OpenAIResponseInputToolMCP, @@ -47,6 +56,7 @@ from llama_stack_api.openai_responses import ( OpenAIResponseOutputMessageFunctionToolCall, OpenAIResponseOutputMessageMCPCall, OpenAIResponseOutputMessageWebSearchToolCall, + OpenAIResponsePrompt, OpenAIResponseText, OpenAIResponseTextFormat, WebSearchToolTypes, @@ -98,6 +108,19 @@ def mock_safety_api(): return safety_api +@pytest.fixture +def mock_prompts_api(): + prompts_api = AsyncMock() + return prompts_api + + +@pytest.fixture +def mock_files_api(): + """Mock files API for testing.""" + files_api = AsyncMock() + return files_api + + @pytest.fixture def openai_responses_impl( mock_inference_api, @@ -107,6 +130,8 @@ def openai_responses_impl( mock_vector_io_api, mock_safety_api, mock_conversations_api, + mock_prompts_api, + mock_files_api, ): return OpenAIResponsesImpl( inference_api=mock_inference_api, @@ -116,6 +141,8 @@ def openai_responses_impl( vector_io_api=mock_vector_io_api, safety_api=mock_safety_api, conversations_api=mock_conversations_api, + prompts_api=mock_prompts_api, + files_api=mock_files_api, ) @@ -499,7 +526,7 @@ async def test_create_openai_response_with_tool_call_function_arguments_none(ope mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall() -async def test_create_openai_response_with_multiple_messages(openai_responses_impl, mock_inference_api): +async def test_create_openai_response_with_multiple_messages(openai_responses_impl, mock_inference_api, mock_files_api): """Test creating an OpenAI response with multiple messages.""" # Setup input_messages = [ @@ -710,7 +737,7 @@ async def test_create_openai_response_with_instructions(openai_responses_impl, m async def test_create_openai_response_with_instructions_and_multiple_messages( - openai_responses_impl, mock_inference_api + openai_responses_impl, mock_inference_api, mock_files_api ): # Setup input_messages = [ @@ -1242,3 +1269,489 @@ async def test_create_openai_response_with_output_types_as_input( assert stored_with_outputs.input == input_with_output_types assert len(stored_with_outputs.input) == 3 + + +async def test_create_openai_response_with_prompt(openai_responses_impl, mock_inference_api, mock_prompts_api): + """Test creating an OpenAI response with a prompt.""" + input_text = "What is the capital of Ireland?" + model = "meta-llama/Llama-3.1-8B-Instruct" + prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef" + prompt = Prompt( + prompt="You are a helpful {{ area_name }} assistant at {{ company_name }}. Always provide accurate information.", + prompt_id=prompt_id, + version=1, + variables=["area_name", "company_name"], + is_default=True, + ) + + openai_response_prompt = OpenAIResponsePrompt( + id=prompt_id, + version="1", + variables={ + "area_name": OpenAIResponseInputMessageContentText(text="geography"), + "company_name": OpenAIResponseInputMessageContentText(text="Dummy Company"), + }, + ) + + mock_prompts_api.get_prompt.return_value = prompt + mock_inference_api.openai_chat_completion.return_value = fake_stream() + + result = await openai_responses_impl.create_openai_response( + input=input_text, + model=model, + prompt=openai_response_prompt, + ) + + mock_prompts_api.get_prompt.assert_called_with(prompt_id, 1) + mock_inference_api.openai_chat_completion.assert_called() + call_args = mock_inference_api.openai_chat_completion.call_args + sent_messages = call_args.args[0].messages + assert len(sent_messages) == 2 + + system_messages = [msg for msg in sent_messages if msg.role == "system"] + assert len(system_messages) == 1 + assert ( + system_messages[0].content + == "You are a helpful geography assistant at Dummy Company. Always provide accurate information." + ) + + user_messages = [msg for msg in sent_messages if msg.role == "user"] + assert len(user_messages) == 1 + assert user_messages[0].content == input_text + + assert result.model == model + assert result.status == "completed" + assert isinstance(result.prompt, OpenAIResponsePrompt) + assert result.prompt.id == prompt_id + assert result.prompt.variables == openai_response_prompt.variables + assert result.prompt.version == "1" + + +async def test_prepend_prompt_successful_without_variables(openai_responses_impl, mock_prompts_api, mock_inference_api): + """Test prepend_prompt function without variables.""" + input_text = "What is the capital of Ireland?" + model = "meta-llama/Llama-3.1-8B-Instruct" + prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef" + prompt = Prompt( + prompt="You are a helpful assistant. Always provide accurate information.", + prompt_id=prompt_id, + version=1, + variables=[], + is_default=True, + ) + + openai_response_prompt = OpenAIResponsePrompt(id=prompt_id, version="1") + + mock_prompts_api.get_prompt.return_value = prompt + mock_inference_api.openai_chat_completion.return_value = fake_stream() + + await openai_responses_impl.create_openai_response( + input=input_text, + model=model, + prompt=openai_response_prompt, + ) + + mock_prompts_api.get_prompt.assert_called_with(prompt_id, 1) + mock_inference_api.openai_chat_completion.assert_called() + call_args = mock_inference_api.openai_chat_completion.call_args + sent_messages = call_args.args[0].messages + assert len(sent_messages) == 2 + system_messages = [msg for msg in sent_messages if msg.role == "system"] + assert system_messages[0].content == "You are a helpful assistant. Always provide accurate information." + + +async def test_prepend_prompt_invalid_variable(openai_responses_impl, mock_prompts_api): + """Test error handling in prepend_prompt function when prompt parameters contain invalid variables.""" + prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef" + prompt = Prompt( + prompt="You are a {{ role }} assistant.", + prompt_id=prompt_id, + version=1, + variables=["role"], # Only "role" is valid + is_default=True, + ) + + openai_response_prompt = OpenAIResponsePrompt( + id=prompt_id, + version="1", + variables={ + "role": OpenAIResponseInputMessageContentText(text="helpful"), + "company": OpenAIResponseInputMessageContentText( + text="Dummy Company" + ), # company is not in prompt.variables + }, + ) + + mock_prompts_api.get_prompt.return_value = prompt + + # Initial messages + messages = [OpenAIUserMessageParam(content="Test prompt")] + + # Execute - should raise ValueError for invalid variable + with pytest.raises(ValueError, match="Variable company not found in prompt"): + await openai_responses_impl._prepend_prompt(messages, openai_response_prompt) + + # Verify + mock_prompts_api.get_prompt.assert_called_once_with(prompt_id, 1) + + +async def test_prepend_prompt_not_found(openai_responses_impl, mock_prompts_api): + """Test prepend_prompt function when prompt is not found.""" + prompt_id = "pmpt_nonexistent" + openai_response_prompt = OpenAIResponsePrompt(id=prompt_id, version="1") + + mock_prompts_api.get_prompt.return_value = None # Prompt not found + + # Initial messages + messages = [OpenAIUserMessageParam(content="Test prompt")] + initial_length = len(messages) + + # Execute + result = await openai_responses_impl._prepend_prompt(messages, openai_response_prompt) + + # Verify + mock_prompts_api.get_prompt.assert_called_once_with(prompt_id, 1) + + # Should return None when prompt not found + assert result is None + + # Messages should not be modified + assert len(messages) == initial_length + assert messages[0].content == "Test prompt" + + +async def test_prepend_prompt_variable_substitution(openai_responses_impl, mock_prompts_api): + """Test complex variable substitution with multiple occurrences and special characters in prepend_prompt function.""" + prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef" + + # Support all whitespace variations: {{name}}, {{ name }}, {{ name}}, {{name }}, etc. + prompt = Prompt( + prompt="Hello {{name}}! You are working at {{ company}}. Your role is {{role}} at {{company}}. Remember, {{ name }}, to be {{ tone }}.", + prompt_id=prompt_id, + version=1, + variables=["name", "company", "role", "tone"], + is_default=True, + ) + + openai_response_prompt = OpenAIResponsePrompt( + id=prompt_id, + version="1", + variables={ + "name": OpenAIResponseInputMessageContentText(text="Alice"), + "company": OpenAIResponseInputMessageContentText(text="Dummy Company"), + "role": OpenAIResponseInputMessageContentText(text="AI Assistant"), + "tone": OpenAIResponseInputMessageContentText(text="professional"), + }, + ) + + mock_prompts_api.get_prompt.return_value = prompt + + # Initial messages + messages = [OpenAIUserMessageParam(content="Test")] + + # Execute + await openai_responses_impl._prepend_prompt(messages, openai_response_prompt) + + # Verify + assert len(messages) == 2 + assert isinstance(messages[0], OpenAISystemMessageParam) + expected_content = "Hello Alice! You are working at Dummy Company. Your role is AI Assistant at Dummy Company. Remember, Alice, to be professional." + assert messages[0].content == expected_content + + +async def test_prepend_prompt_with_image_variable(openai_responses_impl, mock_prompts_api, mock_files_api): + """Test prepend_prompt with image variable - should create placeholder in system message and append image as separate user message.""" + prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef" + prompt = Prompt( + prompt="Analyze this {{product_image}} and describe what you see.", + prompt_id=prompt_id, + version=1, + variables=["product_image"], + is_default=True, + ) + + # Mock file content and file metadata + mock_file_content = b"fake_image_data" + mock_files_api.openai_retrieve_file_content.return_value = type("obj", (object,), {"body": mock_file_content})() + mock_files_api.openai_retrieve_file.return_value = OpenAIFileObject( + object="file", + id="file-abc123", + bytes=len(mock_file_content), + created_at=1234567890, + expires_at=1234567890, + filename="product.jpg", + purpose="assistants", + ) + + openai_response_prompt = OpenAIResponsePrompt( + id=prompt_id, + version="1", + variables={ + "product_image": OpenAIResponseInputMessageContentImage( + file_id="file-abc123", + detail="high", + ) + }, + ) + + mock_prompts_api.get_prompt.return_value = prompt + + # Initial messages + messages = [OpenAIUserMessageParam(content="What do you think?")] + + # Execute + await openai_responses_impl._prepend_prompt(messages, openai_response_prompt) + + assert len(messages) == 3 + + # Check system message has placeholder + assert isinstance(messages[0], OpenAISystemMessageParam) + assert messages[0].content == "Analyze this [Image: product_image] and describe what you see." + + # Check original user message is still there + assert isinstance(messages[1], OpenAIUserMessageParam) + assert messages[1].content == "What do you think?" + + # Check new user message with image is appended + assert isinstance(messages[2], OpenAIUserMessageParam) + assert isinstance(messages[2].content, list) + assert len(messages[2].content) == 1 + + # Should be image with data URL + assert isinstance(messages[2].content[0], OpenAIChatCompletionContentPartImageParam) + assert messages[2].content[0].image_url.url.startswith("data:image/") + assert messages[2].content[0].image_url.detail == "high" + + +async def test_prepend_prompt_with_file_variable(openai_responses_impl, mock_prompts_api, mock_files_api): + """Test prepend_prompt with file variable - should create placeholder in system message and append file as separate user message.""" + prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef" + prompt = Prompt( + prompt="Review the document {{contract_file}} and summarize key points.", + prompt_id=prompt_id, + version=1, + variables=["contract_file"], + is_default=True, + ) + + # Mock file retrieval + mock_file_content = b"fake_pdf_content" + mock_files_api.openai_retrieve_file_content.return_value = type("obj", (object,), {"body": mock_file_content})() + mock_files_api.openai_retrieve_file.return_value = OpenAIFileObject( + object="file", + id="file-contract-789", + bytes=len(mock_file_content), + created_at=1234567890, + expires_at=1234567890, + filename="contract.pdf", + purpose="assistants", + ) + + openai_response_prompt = OpenAIResponsePrompt( + id=prompt_id, + version="1", + variables={ + "contract_file": OpenAIResponseInputMessageContentFile( + file_id="file-contract-789", + filename="contract.pdf", + ) + }, + ) + + mock_prompts_api.get_prompt.return_value = prompt + + # Initial messages + messages = [OpenAIUserMessageParam(content="Please review this.")] + + # Execute + await openai_responses_impl._prepend_prompt(messages, openai_response_prompt) + + assert len(messages) == 3 + + # Check system message has placeholder + assert isinstance(messages[0], OpenAISystemMessageParam) + assert messages[0].content == "Review the document [File: contract_file] and summarize key points." + + # Check original user message is still there + assert isinstance(messages[1], OpenAIUserMessageParam) + assert messages[1].content == "Please review this." + + # Check new user message with file is appended + assert isinstance(messages[2], OpenAIUserMessageParam) + assert isinstance(messages[2].content, list) + assert len(messages[2].content) == 1 + + # First part should be file with data URL + assert isinstance(messages[2].content[0], OpenAIFile) + assert messages[2].content[0].file.file_data.startswith("data:application/pdf;base64,") + assert messages[2].content[0].file.filename == "contract.pdf" + assert messages[2].content[0].file.file_id is None + + +async def test_prepend_prompt_with_mixed_variables(openai_responses_impl, mock_prompts_api, mock_files_api): + """Test prepend_prompt with text, image, and file variables mixed together.""" + prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef" + prompt = Prompt( + prompt="Hello {{name}}! Analyze {{photo}} and review {{document}}. Provide insights for {{company}}.", + prompt_id=prompt_id, + version=1, + variables=["name", "photo", "document", "company"], + is_default=True, + ) + + # Mock file retrieval for image and file + mock_image_content = b"fake_image_data" + mock_file_content = b"fake_doc_content" + + async def mock_retrieve_file_content(file_id): + if file_id == "file-photo-123": + return type("obj", (object,), {"body": mock_image_content})() + elif file_id == "file-doc-456": + return type("obj", (object,), {"body": mock_file_content})() + + mock_files_api.openai_retrieve_file_content.side_effect = mock_retrieve_file_content + + def mock_retrieve_file(file_id): + if file_id == "file-photo-123": + return OpenAIFileObject( + object="file", + id="file-photo-123", + bytes=len(mock_image_content), + created_at=1234567890, + expires_at=1234567890, + filename="photo.jpg", + purpose="assistants", + ) + elif file_id == "file-doc-456": + return OpenAIFileObject( + object="file", + id="file-doc-456", + bytes=len(mock_file_content), + created_at=1234567890, + expires_at=1234567890, + filename="doc.pdf", + purpose="assistants", + ) + + mock_files_api.openai_retrieve_file.side_effect = mock_retrieve_file + + openai_response_prompt = OpenAIResponsePrompt( + id=prompt_id, + version="1", + variables={ + "name": OpenAIResponseInputMessageContentText(text="Alice"), + "photo": OpenAIResponseInputMessageContentImage(file_id="file-photo-123", detail="auto"), + "document": OpenAIResponseInputMessageContentFile(file_id="file-doc-456", filename="doc.pdf"), + "company": OpenAIResponseInputMessageContentText(text="Acme Corp"), + }, + ) + + mock_prompts_api.get_prompt.return_value = prompt + + # Initial messages + messages = [OpenAIUserMessageParam(content="Here's my question.")] + + # Execute + await openai_responses_impl._prepend_prompt(messages, openai_response_prompt) + + assert len(messages) == 3 + + # Check system message has text and placeholders + assert isinstance(messages[0], OpenAISystemMessageParam) + expected_system = "Hello Alice! Analyze [Image: photo] and review [File: document]. Provide insights for Acme Corp." + assert messages[0].content == expected_system + + # Check original user message is still there + assert isinstance(messages[1], OpenAIUserMessageParam) + assert messages[1].content == "Here's my question." + + # Check new user message with media is appended (2 media items) + assert isinstance(messages[2], OpenAIUserMessageParam) + assert isinstance(messages[2].content, list) + assert len(messages[2].content) == 2 + + # First part should be image with data URL + assert isinstance(messages[2].content[0], OpenAIChatCompletionContentPartImageParam) + assert messages[2].content[0].image_url.url.startswith("data:image/") + + # Second part should be file with data URL + assert isinstance(messages[2].content[1], OpenAIFile) + assert messages[2].content[1].file.file_data.startswith("data:application/pdf;base64,") + assert messages[2].content[1].file.filename == "doc.pdf" + assert messages[2].content[1].file.file_id is None + + +async def test_prepend_prompt_with_image_using_image_url(openai_responses_impl, mock_prompts_api): + """Test prepend_prompt with image variable using image_url instead of file_id.""" + prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef" + prompt = Prompt( + prompt="Describe {{screenshot}}.", + prompt_id=prompt_id, + version=1, + variables=["screenshot"], + is_default=True, + ) + + openai_response_prompt = OpenAIResponsePrompt( + id=prompt_id, + version="1", + variables={ + "screenshot": OpenAIResponseInputMessageContentImage( + image_url="https://example.com/screenshot.png", + detail="low", + ) + }, + ) + + mock_prompts_api.get_prompt.return_value = prompt + + # Initial messages + messages = [OpenAIUserMessageParam(content="What is this?")] + + # Execute + await openai_responses_impl._prepend_prompt(messages, openai_response_prompt) + + assert len(messages) == 3 + + # Check system message has placeholder + assert isinstance(messages[0], OpenAISystemMessageParam) + assert messages[0].content == "Describe [Image: screenshot]." + + # Check original user message is still there + assert isinstance(messages[1], OpenAIUserMessageParam) + assert messages[1].content == "What is this?" + + # Check new user message with image is appended + assert isinstance(messages[2], OpenAIUserMessageParam) + assert isinstance(messages[2].content, list) + + # Image should use the provided URL + assert isinstance(messages[2].content[0], OpenAIChatCompletionContentPartImageParam) + assert messages[2].content[0].image_url.url == "https://example.com/screenshot.png" + assert messages[2].content[0].image_url.detail == "low" + + +async def test_prepend_prompt_image_variable_missing_required_fields(openai_responses_impl, mock_prompts_api): + """Test prepend_prompt with image variable that has neither file_id nor image_url - should raise error.""" + prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef" + prompt = Prompt( + prompt="Analyze {{bad_image}}.", + prompt_id=prompt_id, + version=1, + variables=["bad_image"], + is_default=True, + ) + + # Create image content with neither file_id nor image_url + openai_response_prompt = OpenAIResponsePrompt( + id=prompt_id, + version="1", + variables={"bad_image": OpenAIResponseInputMessageContentImage()}, # No file_id or image_url + ) + + mock_prompts_api.get_prompt.return_value = prompt + messages = [OpenAIUserMessageParam(content="Test")] + + # Execute - should raise ValueError + with pytest.raises(ValueError, match="Image content must have either 'image_url' or 'file_id'"): + await openai_responses_impl._prepend_prompt(messages, openai_response_prompt) diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py b/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py index fa1ddae78..5a3e6bf21 100644 --- a/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py +++ b/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py @@ -39,6 +39,8 @@ def responses_impl_with_conversations( mock_vector_io_api, mock_conversations_api, mock_safety_api, + mock_prompts_api, + mock_files_api, ): """Create OpenAIResponsesImpl instance with conversations API.""" return OpenAIResponsesImpl( @@ -49,6 +51,8 @@ def responses_impl_with_conversations( vector_io_api=mock_vector_io_api, conversations_api=mock_conversations_api, safety_api=mock_safety_api, + prompts_api=mock_prompts_api, + files_api=mock_files_api, ) diff --git a/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py b/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py index b7a437686..e496a96e3 100644 --- a/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py +++ b/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py @@ -5,6 +5,8 @@ # the root directory of this source tree. +from unittest.mock import AsyncMock + import pytest from llama_stack.providers.inline.agents.meta_reference.responses.utils import ( @@ -46,6 +48,12 @@ from llama_stack_api.openai_responses import ( ) +@pytest.fixture +def mock_files_api(): + """Mock files API for testing.""" + return AsyncMock() + + class TestConvertChatChoiceToResponseMessage: async def test_convert_string_content(self): choice = OpenAIChoice( @@ -78,17 +86,17 @@ class TestConvertChatChoiceToResponseMessage: class TestConvertResponseContentToChatContent: - async def test_convert_string_content(self): - result = await convert_response_content_to_chat_content("Simple string") + async def test_convert_string_content(self, mock_files_api): + result = await convert_response_content_to_chat_content("Simple string", mock_files_api) assert result == "Simple string" - async def test_convert_text_content_parts(self): + async def test_convert_text_content_parts(self, mock_files_api): content = [ OpenAIResponseInputMessageContentText(text="First part"), OpenAIResponseOutputMessageContentOutputText(text="Second part"), ] - result = await convert_response_content_to_chat_content(content) + result = await convert_response_content_to_chat_content(content, mock_files_api) assert len(result) == 2 assert isinstance(result[0], OpenAIChatCompletionContentPartTextParam) @@ -96,10 +104,10 @@ class TestConvertResponseContentToChatContent: assert isinstance(result[1], OpenAIChatCompletionContentPartTextParam) assert result[1].text == "Second part" - async def test_convert_image_content(self): + async def test_convert_image_content(self, mock_files_api): content = [OpenAIResponseInputMessageContentImage(image_url="https://example.com/image.jpg", detail="high")] - result = await convert_response_content_to_chat_content(content) + result = await convert_response_content_to_chat_content(content, mock_files_api) assert len(result) == 1 assert isinstance(result[0], OpenAIChatCompletionContentPartImageParam) diff --git a/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py b/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py index d4d1b872a..a914bbef4 100644 --- a/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py +++ b/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py @@ -30,6 +30,8 @@ def mock_apis(): "vector_io_api": AsyncMock(), "conversations_api": AsyncMock(), "safety_api": AsyncMock(), + "prompts_api": AsyncMock(), + "files_api": AsyncMock(), } diff --git a/tests/unit/providers/agents/meta_reference/test_safety_optional.py b/tests/unit/providers/agents/meta_reference/test_safety_optional.py index b48d38b29..c2311b68f 100644 --- a/tests/unit/providers/agents/meta_reference/test_safety_optional.py +++ b/tests/unit/providers/agents/meta_reference/test_safety_optional.py @@ -52,6 +52,8 @@ def mock_deps(): tool_runtime_api = AsyncMock() tool_groups_api = AsyncMock() conversations_api = AsyncMock() + prompts_api = AsyncMock() + files_api = AsyncMock() return { Api.inference: inference_api, @@ -59,6 +61,8 @@ def mock_deps(): Api.tool_runtime: tool_runtime_api, Api.tool_groups: tool_groups_api, Api.conversations: conversations_api, + Api.prompts: prompts_api, + Api.files: files_api, } @@ -144,6 +148,8 @@ class TestGuardrailsFunctionality: vector_io_api=mock_deps[Api.vector_io], safety_api=None, # No Safety API conversations_api=mock_deps[Api.conversations], + prompts_api=mock_deps[Api.prompts], + files_api=mock_deps[Api.files], ) # Test with string guardrail @@ -191,6 +197,8 @@ class TestGuardrailsFunctionality: vector_io_api=mock_deps[Api.vector_io], safety_api=None, # No Safety API conversations_api=mock_deps[Api.conversations], + prompts_api=mock_deps[Api.prompts], + files_api=mock_deps[Api.files], ) # Should not raise when no guardrails requested From aa2a7dae07d7ecd9213c3ab4f7fc9fa19eed22cc Mon Sep 17 00:00:00 2001 From: Sam El-Borai Date: Wed, 19 Nov 2025 20:53:20 +0100 Subject: [PATCH 10/14] chore(ci): make stainless workflow more DRY (#4195) # What does this PR do? Addresses feedback from https://github.com/llamastack/llama-stack/pull/4187#discussion_r2542797437 ## Test Plan --- .github/workflows/stainless-builds.yml | 88 +++++++++++--------------- 1 file changed, 38 insertions(+), 50 deletions(-) diff --git a/.github/workflows/stainless-builds.yml b/.github/workflows/stainless-builds.yml index a18c70887..28869fdd8 100644 --- a/.github/workflows/stainless-builds.yml +++ b/.github/workflows/stainless-builds.yml @@ -43,7 +43,41 @@ env: # Stainless organization dashboard jobs: + compute-branch: + runs-on: ubuntu-latest + outputs: + preview_branch: ${{ steps.compute.outputs.preview_branch }} + base_branch: ${{ steps.compute.outputs.base_branch }} + merge_branch: ${{ steps.compute.outputs.merge_branch }} + steps: + - name: Compute branch names + id: compute + run: | + HEAD_REPO="${{ github.event.pull_request.head.repo.full_name }}" + BASE_REPO="${{ github.repository }}" + BRANCH_NAME="${{ github.event.pull_request.head.ref }}" + FORK_OWNER="${{ github.event.pull_request.head.repo.owner.login }}" + + if [ "$HEAD_REPO" != "$BASE_REPO" ]; then + # Fork PR: prefix with fork owner for isolation + if [ -z "$FORK_OWNER" ]; then + echo "Error: Fork PR detected but fork owner is empty" >&2 + exit 1 + fi + PREVIEW_BRANCH="preview/${FORK_OWNER}/${BRANCH_NAME}" + BASE_BRANCH="preview/base/${FORK_OWNER}/${BRANCH_NAME}" + else + # Same-repo PR + PREVIEW_BRANCH="preview/${BRANCH_NAME}" + BASE_BRANCH="preview/base/${BRANCH_NAME}" + fi + + echo "preview_branch=${PREVIEW_BRANCH}" >> $GITHUB_OUTPUT + echo "base_branch=${BASE_BRANCH}" >> $GITHUB_OUTPUT + echo "merge_branch=${PREVIEW_BRANCH}" >> $GITHUB_OUTPUT + preview: + needs: compute-branch if: github.event.action != 'closed' runs-on: ubuntu-latest permissions: @@ -59,32 +93,6 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 2 - # Compute the Stainless branch name, prefixing with fork owner if PR is from a fork. - # For fork PRs like "contributor:fix/issue-123", this creates "preview/contributor/fix/issue-123" - # For same-repo PRs, this creates "preview/fix/issue-123" - - name: Compute branch names - id: branch-names - run: | - HEAD_REPO="${{ github.event.pull_request.head.repo.full_name }}" - BASE_REPO="${{ github.repository }}" - BRANCH_NAME="${{ github.event.pull_request.head.ref }}" - - if [ "$HEAD_REPO" != "$BASE_REPO" ]; then - # Fork PR: prefix with fork owner for isolation - FORK_OWNER="${{ github.event.pull_request.head.repo.owner.login }}" - PREVIEW_BRANCH="preview/${FORK_OWNER}/${BRANCH_NAME}" - BASE_BRANCH="preview/base/${FORK_OWNER}/${BRANCH_NAME}" - else - # Same-repo PR - PREVIEW_BRANCH="preview/${BRANCH_NAME}" - BASE_BRANCH="preview/base/${BRANCH_NAME}" - fi - - echo "preview_branch=${PREVIEW_BRANCH}" >> $GITHUB_OUTPUT - echo "base_branch=${BASE_BRANCH}" >> $GITHUB_OUTPUT - - # This action builds preview SDKs from the OpenAPI spec changes and - # posts/updates a comment on the PR with build results and links to the preview. - name: Run preview builds uses: stainless-api/upload-openapi-spec-action/preview@32823b096b4319c53ee948d702d9052873af485f # 1.6.0 with: @@ -97,10 +105,11 @@ jobs: base_sha: ${{ github.event.pull_request.base.sha }} base_ref: ${{ github.event.pull_request.base.ref }} head_sha: ${{ github.event.pull_request.head.sha }} - branch: ${{ steps.branch-names.outputs.preview_branch }} - base_branch: ${{ steps.branch-names.outputs.base_branch }} + branch: ${{ needs.compute-branch.outputs.preview_branch }} + base_branch: ${{ needs.compute-branch.outputs.base_branch }} merge: + needs: compute-branch if: github.event.action == 'closed' && github.event.pull_request.merged == true runs-on: ubuntu-latest permissions: @@ -116,27 +125,6 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 2 - # Compute the Stainless branch name, prefixing with fork owner if PR is from a fork. - # For fork PRs like "contributor:fix/issue-123", this creates "preview/contributor/fix/issue-123" - # For same-repo PRs, this creates "preview/fix/issue-123" - - name: Compute branch names - id: branch-names - run: | - HEAD_REPO="${{ github.event.pull_request.head.repo.full_name }}" - BASE_REPO="${{ github.repository }}" - BRANCH_NAME="${{ github.event.pull_request.head.ref }}" - - if [ "$HEAD_REPO" != "$BASE_REPO" ]; then - # Fork PR: prefix with fork owner for isolation - FORK_OWNER="${{ github.event.pull_request.head.repo.owner.login }}" - MERGE_BRANCH="preview/${FORK_OWNER}/${BRANCH_NAME}" - else - # Same-repo PR - MERGE_BRANCH="preview/${BRANCH_NAME}" - fi - - echo "merge_branch=${MERGE_BRANCH}" >> $GITHUB_OUTPUT - # Note that this only merges in changes that happened on the last build on # the computed preview branch. It's possible that there are OAS/config # changes that haven't been built, if the preview job didn't finish @@ -155,4 +143,4 @@ jobs: base_sha: ${{ github.event.pull_request.base.sha }} base_ref: ${{ github.event.pull_request.base.ref }} head_sha: ${{ github.event.pull_request.head.sha }} - merge_branch: ${{ steps.branch-names.outputs.merge_branch }} + merge_branch: ${{ needs.compute-branch.outputs.merge_branch }} From b6ce2428083fd3ec46f4422473dbc2512b835e66 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Wed, 19 Nov 2025 13:43:11 -0800 Subject: [PATCH 11/14] chore: update code owners (#4199) Update code owners given changed affiliations, projects, etc. --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8fff470f6..418d3113a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,4 +2,4 @@ # These owners will be the default owners for everything in # the repo. Unless a later match takes precedence, -* @ashwinb @yanxi0830 @hardikjshah @raghotham @ehhuang @leseb @bbrowning @reluctantfuturist @mattf @slekkala1 @franciscojavierarceo +* @ashwinb @raghotham @ehhuang @leseb @bbrowning @mattf @franciscojavierarceo From d649c3663e3293a86cf1e9d4d83c91cba7032857 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Wed, 19 Nov 2025 14:49:44 -0800 Subject: [PATCH 12/14] fix: enforce allowed_models during inference requests (#4197) The `allowed_models` configuration was only being applied when listing models via the `/v1/models` endpoint, but the actual inference requests weren't checking this restriction. This meant users could directly request any model the provider supports by specifying it in their inference call, completely bypassing the intended cost controls. The fix adds validation to all three inference methods (chat completions, completions, and embeddings) that checks the requested model against the allowed_models list before making the provider API call. ### Test plan Added unit tests --- .../providers/utils/inference/openai_mixin.py | 28 ++++- .../utils/inference/test_openai_mixin.py | 102 +++++++++++++++++- 2 files changed, 126 insertions(+), 4 deletions(-) diff --git a/src/llama_stack/providers/utils/inference/openai_mixin.py b/src/llama_stack/providers/utils/inference/openai_mixin.py index 559ac90ce..30511a341 100644 --- a/src/llama_stack/providers/utils/inference/openai_mixin.py +++ b/src/llama_stack/providers/utils/inference/openai_mixin.py @@ -213,6 +213,19 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel): return api_key + def _validate_model_allowed(self, provider_model_id: str) -> None: + """ + Validate that the model is in the allowed_models list if configured. + + :param provider_model_id: The provider-specific model ID to validate + :raises ValueError: If the model is not in the allowed_models list + """ + if self.config.allowed_models is not None and provider_model_id not in self.config.allowed_models: + raise ValueError( + f"Model '{provider_model_id}' is not in the allowed models list. " + f"Allowed models: {self.config.allowed_models}" + ) + async def _get_provider_model_id(self, model: str) -> str: """ Get the provider-specific model ID from the model store. @@ -259,8 +272,11 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel): Direct OpenAI completion API call. """ # TODO: fix openai_completion to return type compatible with OpenAI's API response + provider_model_id = await self._get_provider_model_id(params.model) + self._validate_model_allowed(provider_model_id) + completion_kwargs = await prepare_openai_completion_params( - model=await self._get_provider_model_id(params.model), + model=provider_model_id, prompt=params.prompt, best_of=params.best_of, echo=params.echo, @@ -292,6 +308,9 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel): """ Direct OpenAI chat completion API call. """ + provider_model_id = await self._get_provider_model_id(params.model) + self._validate_model_allowed(provider_model_id) + messages = params.messages if self.download_images: @@ -313,7 +332,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel): messages = [await _localize_image_url(m) for m in messages] request_params = await prepare_openai_completion_params( - model=await self._get_provider_model_id(params.model), + model=provider_model_id, messages=messages, frequency_penalty=params.frequency_penalty, function_call=params.function_call, @@ -351,10 +370,13 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel): """ Direct OpenAI embeddings API call. """ + provider_model_id = await self._get_provider_model_id(params.model) + self._validate_model_allowed(provider_model_id) + # Build request params conditionally to avoid NotGiven/Omit type mismatch # The OpenAI SDK uses Omit in signatures but NOT_GIVEN has type NotGiven request_params: dict[str, Any] = { - "model": await self._get_provider_model_id(params.model), + "model": provider_model_id, "input": params.input, } if params.encoding_format is not None: diff --git a/tests/unit/providers/utils/inference/test_openai_mixin.py b/tests/unit/providers/utils/inference/test_openai_mixin.py index 5b13a75f4..02d44f2ba 100644 --- a/tests/unit/providers/utils/inference/test_openai_mixin.py +++ b/tests/unit/providers/utils/inference/test_openai_mixin.py @@ -15,7 +15,14 @@ from pydantic import BaseModel, Field from llama_stack.core.request_headers import request_provider_data_context from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin -from llama_stack_api import Model, ModelType, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam +from llama_stack_api import ( + Model, + ModelType, + OpenAIChatCompletionRequestWithExtraBody, + OpenAICompletionRequestWithExtraBody, + OpenAIEmbeddingsRequestWithExtraBody, + OpenAIUserMessageParam, +) class OpenAIMixinImpl(OpenAIMixin): @@ -834,3 +841,96 @@ class TestOpenAIMixinProviderDataApiKey: error_message = str(exc_info.value) assert "test_api_key" in error_message assert "x-llamastack-provider-data" in error_message + + +class TestOpenAIMixinAllowedModelsInference: + """Test cases for allowed_models enforcement during inference requests""" + + async def test_inference_with_allowed_models(self, mixin, mock_client_context): + """Test that all inference methods succeed with allowed models""" + mixin.config.allowed_models = ["gpt-4", "text-davinci-003", "text-embedding-ada-002"] + + mock_client = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=MagicMock()) + mock_client.completions.create = AsyncMock(return_value=MagicMock()) + mock_embedding_response = MagicMock() + mock_embedding_response.data = [MagicMock(embedding=[0.1, 0.2, 0.3])] + mock_embedding_response.usage = MagicMock(prompt_tokens=5, total_tokens=5) + mock_client.embeddings.create = AsyncMock(return_value=mock_embedding_response) + + with mock_client_context(mixin, mock_client): + # Test chat completion + await mixin.openai_chat_completion( + OpenAIChatCompletionRequestWithExtraBody( + model="gpt-4", messages=[OpenAIUserMessageParam(role="user", content="Hello")] + ) + ) + mock_client.chat.completions.create.assert_called_once() + + # Test completion + await mixin.openai_completion( + OpenAICompletionRequestWithExtraBody(model="text-davinci-003", prompt="Hello") + ) + mock_client.completions.create.assert_called_once() + + # Test embeddings + await mixin.openai_embeddings( + OpenAIEmbeddingsRequestWithExtraBody(model="text-embedding-ada-002", input="test text") + ) + mock_client.embeddings.create.assert_called_once() + + async def test_inference_with_disallowed_models(self, mixin, mock_client_context): + """Test that all inference methods fail with disallowed models""" + mixin.config.allowed_models = ["gpt-4"] + + mock_client = MagicMock() + + with mock_client_context(mixin, mock_client): + # Test chat completion with disallowed model + with pytest.raises(ValueError, match="Model 'gpt-4-turbo' is not in the allowed models list"): + await mixin.openai_chat_completion( + OpenAIChatCompletionRequestWithExtraBody( + model="gpt-4-turbo", messages=[OpenAIUserMessageParam(role="user", content="Hello")] + ) + ) + + # Test completion with disallowed model + with pytest.raises(ValueError, match="Model 'text-davinci-002' is not in the allowed models list"): + await mixin.openai_completion( + OpenAICompletionRequestWithExtraBody(model="text-davinci-002", prompt="Hello") + ) + + # Test embeddings with disallowed model + with pytest.raises(ValueError, match="Model 'text-embedding-3-large' is not in the allowed models list"): + await mixin.openai_embeddings( + OpenAIEmbeddingsRequestWithExtraBody(model="text-embedding-3-large", input="test text") + ) + + mock_client.chat.completions.create.assert_not_called() + mock_client.completions.create.assert_not_called() + mock_client.embeddings.create.assert_not_called() + + async def test_inference_with_no_restrictions(self, mixin, mock_client_context): + """Test that inference succeeds when allowed_models is None or empty list blocks all""" + # Test with None (no restrictions) + assert mixin.config.allowed_models is None + mock_client = MagicMock() + mock_client.chat.completions.create = AsyncMock(return_value=MagicMock()) + + with mock_client_context(mixin, mock_client): + await mixin.openai_chat_completion( + OpenAIChatCompletionRequestWithExtraBody( + model="any-model", messages=[OpenAIUserMessageParam(role="user", content="Hello")] + ) + ) + mock_client.chat.completions.create.assert_called_once() + + # Test with empty list (blocks all models) + mixin.config.allowed_models = [] + with mock_client_context(mixin, mock_client): + with pytest.raises(ValueError, match="Model 'gpt-4' is not in the allowed models list"): + await mixin.openai_chat_completion( + OpenAIChatCompletionRequestWithExtraBody( + model="gpt-4", messages=[OpenAIUserMessageParam(role="user", content="Hello")] + ) + ) From acf74cb8df904b16612dbdca4819b2db9b2bb64d Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Wed, 19 Nov 2025 16:25:30 -0800 Subject: [PATCH 13/14] feat(ci): add --typescript-only flag to skip Python tests in integration test script (#4201) This adds a `--typescript-only` flag to `scripts/integration-tests.sh` that skips pytest execution entirely while still starting the Llama Stack server (required for TS client tests). The TypeScript client can now be tested independently without Python test dependencies. --- scripts/integration-tests.sh | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/scripts/integration-tests.sh b/scripts/integration-tests.sh index 20ecd0c4d..2adef892d 100755 --- a/scripts/integration-tests.sh +++ b/scripts/integration-tests.sh @@ -20,6 +20,7 @@ TEST_PATTERN="" INFERENCE_MODE="replay" EXTRA_PARAMS="" COLLECT_ONLY=false +TYPESCRIPT_ONLY=false # Function to display usage usage() { @@ -34,6 +35,7 @@ Options: --subdirs STRING Comma-separated list of test subdirectories to run (overrides suite) --pattern STRING Regex pattern to pass to pytest -k --collect-only Collect tests only without running them (skips server startup) + --typescript-only Skip Python tests and run only TypeScript client tests --help Show this help message Suites are defined in tests/integration/suites.py and define which tests to run. @@ -90,6 +92,10 @@ while [[ $# -gt 0 ]]; do COLLECT_ONLY=true shift ;; + --typescript-only) + TYPESCRIPT_ONLY=true + shift + ;; --help) usage exit 0 @@ -544,16 +550,23 @@ if [[ -n "$STACK_CONFIG" ]]; then STACK_CONFIG_ARG="--stack-config=$STACK_CONFIG" fi -pytest -s -v $PYTEST_TARGET \ - $STACK_CONFIG_ARG \ - --inference-mode="$INFERENCE_MODE" \ - -k "$PYTEST_PATTERN" \ - $EXTRA_PARAMS \ - --color=yes \ - --embedding-model=sentence-transformers/nomic-ai/nomic-embed-text-v1.5 \ - --color=yes $EXTRA_PARAMS \ - --capture=tee-sys -exit_code=$? +# Run Python tests unless typescript-only mode +if [[ "$TYPESCRIPT_ONLY" == "false" ]]; then + pytest -s -v $PYTEST_TARGET \ + $STACK_CONFIG_ARG \ + --inference-mode="$INFERENCE_MODE" \ + -k "$PYTEST_PATTERN" \ + $EXTRA_PARAMS \ + --color=yes \ + --embedding-model=sentence-transformers/nomic-ai/nomic-embed-text-v1.5 \ + --color=yes $EXTRA_PARAMS \ + --capture=tee-sys + exit_code=$? +else + echo "Skipping Python tests (--typescript-only mode)" + exit_code=0 +fi + set +x set -e From dc4665af179e83b8f93d5e3a004e5751761e55a5 Mon Sep 17 00:00:00 2001 From: Ken Dreyer Date: Fri, 21 Nov 2025 09:48:05 -0500 Subject: [PATCH 14/14] feat!: change bedrock bearer token env variable to match AWS docs & boto3 convention (#4152) Rename `AWS_BEDROCK_API_KEY` to `AWS_BEARER_TOKEN_BEDROCK` to align with the naming convention used in AWS Bedrock documentation and the AWS web console UI. This reduces confusion when developers compare LLS docs with AWS docs. Closes #4147 --- docs/docs/providers/inference/remote_bedrock.mdx | 2 +- .../distributions/ci-tests/run-with-postgres-store.yaml | 2 +- src/llama_stack/distributions/ci-tests/run.yaml | 2 +- .../distributions/starter-gpu/run-with-postgres-store.yaml | 2 +- src/llama_stack/distributions/starter-gpu/run.yaml | 2 +- .../distributions/starter/run-with-postgres-store.yaml | 2 +- src/llama_stack/distributions/starter/run.yaml | 2 +- .../providers/remote/inference/bedrock/bedrock.py | 4 ++-- .../providers/remote/inference/bedrock/config.py | 6 +++--- tests/unit/providers/inference/test_bedrock_adapter.py | 4 ++-- tests/unit/providers/inference/test_bedrock_config.py | 4 ++-- 11 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/docs/providers/inference/remote_bedrock.mdx b/docs/docs/providers/inference/remote_bedrock.mdx index 86bef3000..0b36ea01a 100644 --- a/docs/docs/providers/inference/remote_bedrock.mdx +++ b/docs/docs/providers/inference/remote_bedrock.mdx @@ -22,6 +22,6 @@ AWS Bedrock inference provider using OpenAI compatible endpoint. ## Sample Configuration ```yaml -api_key: ${env.AWS_BEDROCK_API_KEY:=} +api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=} region_name: ${env.AWS_DEFAULT_REGION:=us-east-2} ``` diff --git a/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml b/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml index d942c23a4..7721138c7 100644 --- a/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml +++ b/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml @@ -47,7 +47,7 @@ providers: - provider_id: bedrock provider_type: remote::bedrock config: - api_key: ${env.AWS_BEDROCK_API_KEY:=} + api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=} region_name: ${env.AWS_DEFAULT_REGION:=us-east-2} - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia diff --git a/src/llama_stack/distributions/ci-tests/run.yaml b/src/llama_stack/distributions/ci-tests/run.yaml index 8b1cd2bb2..b791e1488 100644 --- a/src/llama_stack/distributions/ci-tests/run.yaml +++ b/src/llama_stack/distributions/ci-tests/run.yaml @@ -47,7 +47,7 @@ providers: - provider_id: bedrock provider_type: remote::bedrock config: - api_key: ${env.AWS_BEDROCK_API_KEY:=} + api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=} region_name: ${env.AWS_DEFAULT_REGION:=us-east-2} - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia diff --git a/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml b/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml index 75cc9d188..9c250c05a 100644 --- a/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +++ b/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml @@ -47,7 +47,7 @@ providers: - provider_id: bedrock provider_type: remote::bedrock config: - api_key: ${env.AWS_BEDROCK_API_KEY:=} + api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=} region_name: ${env.AWS_DEFAULT_REGION:=us-east-2} - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia diff --git a/src/llama_stack/distributions/starter-gpu/run.yaml b/src/llama_stack/distributions/starter-gpu/run.yaml index 09c7be5a1..65f9ae326 100644 --- a/src/llama_stack/distributions/starter-gpu/run.yaml +++ b/src/llama_stack/distributions/starter-gpu/run.yaml @@ -47,7 +47,7 @@ providers: - provider_id: bedrock provider_type: remote::bedrock config: - api_key: ${env.AWS_BEDROCK_API_KEY:=} + api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=} region_name: ${env.AWS_DEFAULT_REGION:=us-east-2} - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia diff --git a/src/llama_stack/distributions/starter/run-with-postgres-store.yaml b/src/llama_stack/distributions/starter/run-with-postgres-store.yaml index f59c809d2..3314bb9e9 100644 --- a/src/llama_stack/distributions/starter/run-with-postgres-store.yaml +++ b/src/llama_stack/distributions/starter/run-with-postgres-store.yaml @@ -47,7 +47,7 @@ providers: - provider_id: bedrock provider_type: remote::bedrock config: - api_key: ${env.AWS_BEDROCK_API_KEY:=} + api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=} region_name: ${env.AWS_DEFAULT_REGION:=us-east-2} - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia diff --git a/src/llama_stack/distributions/starter/run.yaml b/src/llama_stack/distributions/starter/run.yaml index 435bb22a7..e88539e6a 100644 --- a/src/llama_stack/distributions/starter/run.yaml +++ b/src/llama_stack/distributions/starter/run.yaml @@ -47,7 +47,7 @@ providers: - provider_id: bedrock provider_type: remote::bedrock config: - api_key: ${env.AWS_BEDROCK_API_KEY:=} + api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=} region_name: ${env.AWS_DEFAULT_REGION:=us-east-2} - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia diff --git a/src/llama_stack/providers/remote/inference/bedrock/bedrock.py b/src/llama_stack/providers/remote/inference/bedrock/bedrock.py index 70ee95916..451549db8 100644 --- a/src/llama_stack/providers/remote/inference/bedrock/bedrock.py +++ b/src/llama_stack/providers/remote/inference/bedrock/bedrock.py @@ -37,7 +37,7 @@ class BedrockInferenceAdapter(OpenAIMixin): """ config: BedrockConfig - provider_data_api_key_field: str = "aws_bedrock_api_key" + provider_data_api_key_field: str = "aws_bearer_token_bedrock" def get_base_url(self) -> str: """Get base URL for OpenAI client.""" @@ -111,7 +111,7 @@ class BedrockInferenceAdapter(OpenAIMixin): logger.error(f"AWS Bedrock authentication token expired: {error_msg}") raise ValueError( "AWS Bedrock authentication failed: Bearer token has expired. " - "The AWS_BEDROCK_API_KEY environment variable contains an expired pre-signed URL. " + "The AWS_BEARER_TOKEN_BEDROCK environment variable contains an expired pre-signed URL. " "Please refresh your token by generating a new pre-signed URL with AWS credentials. " "Refer to AWS Bedrock documentation for details on OpenAI-compatible endpoints." ) from e diff --git a/src/llama_stack/providers/remote/inference/bedrock/config.py b/src/llama_stack/providers/remote/inference/bedrock/config.py index 631a6e7ef..f31db63aa 100644 --- a/src/llama_stack/providers/remote/inference/bedrock/config.py +++ b/src/llama_stack/providers/remote/inference/bedrock/config.py @@ -12,9 +12,9 @@ from llama_stack.providers.utils.inference.model_registry import RemoteInference class BedrockProviderDataValidator(BaseModel): - aws_bedrock_api_key: str | None = Field( + aws_bearer_token_bedrock: str | None = Field( default=None, - description="API key for Amazon Bedrock", + description="API Key (Bearer token) for Amazon Bedrock", ) @@ -27,6 +27,6 @@ class BedrockConfig(RemoteInferenceProviderConfig): @classmethod def sample_run_config(cls, **kwargs): return { - "api_key": "${env.AWS_BEDROCK_API_KEY:=}", + "api_key": "${env.AWS_BEARER_TOKEN_BEDROCK:=}", "region_name": "${env.AWS_DEFAULT_REGION:=us-east-2}", } diff --git a/tests/unit/providers/inference/test_bedrock_adapter.py b/tests/unit/providers/inference/test_bedrock_adapter.py index a20f2860a..2a1ca769b 100644 --- a/tests/unit/providers/inference/test_bedrock_adapter.py +++ b/tests/unit/providers/inference/test_bedrock_adapter.py @@ -40,8 +40,8 @@ def test_api_key_from_header_overrides_config(): """Test API key from request header overrides config via client property""" config = BedrockConfig(api_key="config-key", region_name="us-east-1") adapter = BedrockInferenceAdapter(config=config) - adapter.provider_data_api_key_field = "aws_bedrock_api_key" - adapter.get_request_provider_data = MagicMock(return_value=SimpleNamespace(aws_bedrock_api_key="header-key")) + adapter.provider_data_api_key_field = "aws_bearer_token_bedrock" + adapter.get_request_provider_data = MagicMock(return_value=SimpleNamespace(aws_bearer_token_bedrock="header-key")) # The client property is where header override happens (in OpenAIMixin) assert adapter.client.api_key == "header-key" diff --git a/tests/unit/providers/inference/test_bedrock_config.py b/tests/unit/providers/inference/test_bedrock_config.py index 4c1fd56a2..622080426 100644 --- a/tests/unit/providers/inference/test_bedrock_config.py +++ b/tests/unit/providers/inference/test_bedrock_config.py @@ -9,7 +9,7 @@ from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig def test_bedrock_config_defaults_no_env(monkeypatch): """Test BedrockConfig defaults when env vars are not set""" - monkeypatch.delenv("AWS_BEDROCK_API_KEY", raising=False) + monkeypatch.delenv("AWS_BEARER_TOKEN_BEDROCK", raising=False) monkeypatch.delenv("AWS_DEFAULT_REGION", raising=False) config = BedrockConfig() assert config.auth_credential is None @@ -35,5 +35,5 @@ def test_bedrock_config_sample(): sample = BedrockConfig.sample_run_config() assert "api_key" in sample assert "region_name" in sample - assert sample["api_key"] == "${env.AWS_BEDROCK_API_KEY:=}" + assert sample["api_key"] == "${env.AWS_BEARER_TOKEN_BEDROCK:=}" assert sample["region_name"] == "${env.AWS_DEFAULT_REGION:=us-east-2}"