Merge branch 'main' into alexey-doc-links-update

This commit is contained in:
Alexey Rybak 2025-09-16 11:09:25 -07:00 committed by GitHub
commit 3bccc57f34
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
46 changed files with 13855 additions and 472 deletions

View file

@ -13,11 +13,8 @@ on:
branches: [ main ]
types: [opened, synchronize, reopened]
paths:
- 'llama_stack/**'
- '!llama_stack/ui/**'
- 'tests/**'
- 'uv.lock'
- 'pyproject.toml'
- 'docs/_static/llama-stack-spec.yaml'
- 'docs/_static/llama-stack-spec.html'
- '.github/workflows/conformance.yml' # This workflow itself
concurrency:
@ -43,10 +40,27 @@ jobs:
ref: ${{ github.event.pull_request.base.ref }}
path: 'base'
# Cache oasdiff to avoid checksum failures and speed up builds
- name: Cache oasdiff
id: cache-oasdiff
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: ~/oasdiff
key: oasdiff-${{ runner.os }}
# Install oasdiff: https://github.com/oasdiff/oasdiff, a tool for detecting breaking changes in OpenAPI specs.
- name: Install oasdiff
if: steps.cache-oasdiff.outputs.cache-hit != 'true'
run: |
curl -fsSL https://raw.githubusercontent.com/oasdiff/oasdiff/main/install.sh | sh
cp /usr/local/bin/oasdiff ~/oasdiff
# Setup cached oasdiff
- name: Setup cached oasdiff
if: steps.cache-oasdiff.outputs.cache-hit == 'true'
run: |
sudo cp ~/oasdiff /usr/local/bin/oasdiff
sudo chmod +x /usr/local/bin/oasdiff
# Run oasdiff to detect breaking changes in the API specification
# This step will fail if incompatible changes are detected, preventing breaking changes from being merged

View file

@ -47,11 +47,21 @@ jobs:
run: npm ci
working-directory: llama_stack/ui
- uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1
- name: Run pre-commit
id: precommit
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1
continue-on-error: true
env:
SKIP: no-commit-to-branch
RUFF_OUTPUT_FORMAT: github
- name: Check pre-commit results
if: steps.precommit.outcome == 'failure'
run: |
echo "::error::Pre-commit hooks failed. Please run 'pre-commit run --all-files' locally and commit the fixes."
echo "::warning::Some pre-commit hooks failed. Check the output above for details."
exit 1
- name: Debug
run: |
echo "github.ref: ${{ github.ref }}"
@ -79,17 +89,23 @@ jobs:
echo "No changes to commit"
fi
- name: Verify if there are any diff files after pre-commit
- name: Verify no uncommitted changes
if: github.actor != 'dependabot[bot]'
run: |
git diff --exit-code || (echo "There are uncommitted changes, run pre-commit locally and commit again" && exit 1)
if ! git diff --exit-code; then
echo "::error::There are uncommitted changes after pre-commit. Please run 'pre-commit run --all-files' locally and commit the fixes."
echo "::warning::Files with changes:"
git diff --name-status
exit 1
fi
- name: Verify if there are any new files after pre-commit
if: github.actor != 'dependabot[bot]'
run: |
unstaged_files=$(git ls-files --others --exclude-standard)
if [ -n "$unstaged_files" ]; then
echo "There are uncommitted new files, run pre-commit locally and commit again"
echo "::error::There are new untracked files after pre-commit. Please run 'pre-commit run --all-files' locally and commit the fixes."
echo "::warning::New files:"
echo "$unstaged_files"
exit 1
fi

View file

@ -1380,6 +1380,40 @@
}
}
]
},
"delete": {
"responses": {
"200": {
"description": "OK"
},
"400": {
"$ref": "#/components/responses/BadRequest400"
},
"429": {
"$ref": "#/components/responses/TooManyRequests429"
},
"500": {
"$ref": "#/components/responses/InternalServerError500"
},
"default": {
"$ref": "#/components/responses/DefaultError"
}
},
"tags": [
"Benchmarks"
],
"description": "Unregister a benchmark.",
"parameters": [
{
"name": "benchmark_id",
"in": "path",
"description": "The ID of the benchmark to unregister.",
"required": true,
"schema": {
"type": "string"
}
}
]
}
},
"/v1/openai/v1/chat/completions/{completion_id}": {
@ -1620,6 +1654,40 @@
}
}
]
},
"delete": {
"responses": {
"200": {
"description": "OK"
},
"400": {
"$ref": "#/components/responses/BadRequest400"
},
"429": {
"$ref": "#/components/responses/TooManyRequests429"
},
"500": {
"$ref": "#/components/responses/InternalServerError500"
},
"default": {
"$ref": "#/components/responses/DefaultError"
}
},
"tags": [
"ScoringFunctions"
],
"description": "Unregister a scoring function.",
"parameters": [
{
"name": "scoring_fn_id",
"in": "path",
"description": "The ID of the scoring function to unregister.",
"required": true,
"schema": {
"type": "string"
}
}
]
}
},
"/v1/shields/{identifier}": {

View file

@ -954,6 +954,30 @@ paths:
required: true
schema:
type: string
delete:
responses:
'200':
description: OK
'400':
$ref: '#/components/responses/BadRequest400'
'429':
$ref: >-
#/components/responses/TooManyRequests429
'500':
$ref: >-
#/components/responses/InternalServerError500
default:
$ref: '#/components/responses/DefaultError'
tags:
- Benchmarks
description: Unregister a benchmark.
parameters:
- name: benchmark_id
in: path
description: The ID of the benchmark to unregister.
required: true
schema:
type: string
/v1/openai/v1/chat/completions/{completion_id}:
get:
responses:
@ -1119,6 +1143,31 @@ paths:
required: true
schema:
type: string
delete:
responses:
'200':
description: OK
'400':
$ref: '#/components/responses/BadRequest400'
'429':
$ref: >-
#/components/responses/TooManyRequests429
'500':
$ref: >-
#/components/responses/InternalServerError500
default:
$ref: '#/components/responses/DefaultError'
tags:
- ScoringFunctions
description: Unregister a scoring function.
parameters:
- name: scoring_fn_id
in: path
description: >-
The ID of the scoring function to unregister.
required: true
schema:
type: string
/v1/shields/{identifier}:
get:
responses:

View file

@ -0,0 +1,701 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "1ztegmwm4sp",
"metadata": {},
"source": [
"## LlamaStack + LangChain Integration Tutorial\n",
"\n",
"This notebook demonstrates how to integrate **LlamaStack** with **LangChain** to build a complete RAG (Retrieval-Augmented Generation) system.\n",
"\n",
"### Overview\n",
"\n",
"- **LlamaStack**: Provides the infrastructure for running LLMs and Open AI Compatible Vector Stores\n",
"- **LangChain**: Provides the framework for chaining operations and prompt templates\n",
"- **Integration**: Uses LlamaStack's OpenAI-compatible API with LangChain\n",
"\n",
"### What You'll See\n",
"\n",
"1. Setting up LlamaStack server with Fireworks AI provider\n",
"2. Creating and Querying Vector Stores\n",
"3. Building RAG chains with LangChain + LLAMAStack\n",
"4. Querying the chain for relevant information\n",
"\n",
"### Prerequisites\n",
"\n",
"- Fireworks API key\n",
"\n",
"---\n",
"\n",
"### 1. Installation and Setup"
]
},
{
"cell_type": "markdown",
"id": "2ktr5ls2cas",
"metadata": {},
"source": [
"#### Install Required Dependencies\n",
"\n",
"First, we install all the necessary packages for LangChain and FastAPI integration."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "5b6a6a17-b931-4bea-8273-0d6e5563637a",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: uv in /Users/swapna942/miniconda3/lib/python3.12/site-packages (0.7.20)\n",
"\u001b[2mUsing Python 3.12.11 environment at: /Users/swapna942/miniconda3\u001b[0m\n",
"\u001b[2mAudited \u001b[1m7 packages\u001b[0m \u001b[2min 42ms\u001b[0m\u001b[0m\n"
]
}
],
"source": [
"!pip install uv\n",
"!uv pip install fastapi uvicorn \"langchain>=0.2\" langchain-openai \\\n",
" langchain-community langchain-text-splitters \\\n",
" faiss-cpu"
]
},
{
"cell_type": "markdown",
"id": "wmt9jvqzh7n",
"metadata": {},
"source": [
"### 2. LlamaStack Server Setup\n",
"\n",
"#### Build and Start LlamaStack Server\n",
"\n",
"This section sets up the LlamaStack server with:\n",
"- **Fireworks AI** as the inference provider\n",
"- **Sentence Transformers** for embeddings\n",
"\n",
"The server runs on `localhost:8321` and provides OpenAI-compatible endpoints."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "dd2dacf3-ec8b-4cc7-8ff4-b5b6ea4a6e9e",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"import os\n",
"import subprocess\n",
"import time\n",
"\n",
"# Remove UV_SYSTEM_PYTHON to ensure uv creates a proper virtual environment\n",
"# instead of trying to use system Python globally, which could cause permission issues\n",
"# and package conflicts with the system's Python installation\n",
"if \"UV_SYSTEM_PYTHON\" in os.environ:\n",
" del os.environ[\"UV_SYSTEM_PYTHON\"]\n",
"\n",
"def run_llama_stack_server_background():\n",
" \"\"\"Build and run LlamaStack server in one step using --run flag\"\"\"\n",
" log_file = open(\"llama_stack_server.log\", \"w\")\n",
" process = subprocess.Popen(\n",
" \"uv run --with llama-stack llama stack build --distro starter --image-type venv --run\",\n",
" shell=True,\n",
" stdout=log_file,\n",
" stderr=log_file,\n",
" text=True,\n",
" )\n",
"\n",
" print(f\"Building and starting Llama Stack server with PID: {process.pid}\")\n",
" return process\n",
"\n",
"\n",
"def wait_for_server_to_start():\n",
" import requests\n",
" from requests.exceptions import ConnectionError\n",
"\n",
" url = \"http://0.0.0.0:8321/v1/health\"\n",
" max_retries = 30\n",
" retry_interval = 1\n",
"\n",
" print(\"Waiting for server to start\", end=\"\")\n",
" for _ in range(max_retries):\n",
" try:\n",
" response = requests.get(url)\n",
" if response.status_code == 200:\n",
" print(\"\\nServer is ready!\")\n",
" return True\n",
" except ConnectionError:\n",
" print(\".\", end=\"\", flush=True)\n",
" time.sleep(retry_interval)\n",
"\n",
" print(\"\\nServer failed to start after\", max_retries * retry_interval, \"seconds\")\n",
" return False\n",
"\n",
"\n",
"def kill_llama_stack_server():\n",
" # Kill any existing llama stack server processes using pkill command\n",
" os.system(\"pkill -f llama_stack.core.server.server\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "28bd8dbd-4576-4e76-813f-21ab94db44a2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Building and starting Llama Stack server with PID: 19747\n",
"Waiting for server to start....\n",
"Server is ready!\n"
]
}
],
"source": [
"server_process = run_llama_stack_server_background()\n",
"assert wait_for_server_to_start()"
]
},
{
"cell_type": "markdown",
"id": "gr9cdcg4r7n",
"metadata": {},
"source": [
"#### Install LlamaStack Client\n",
"\n",
"Install the client library to interact with the LlamaStack server."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "487d2dbc-d071-400e-b4f0-dcee58f8dc95",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2mUsing Python 3.12.11 environment at: /Users/swapna942/miniconda3\u001b[0m\n",
"\u001b[2mAudited \u001b[1m1 package\u001b[0m \u001b[2min 27ms\u001b[0m\u001b[0m\n"
]
}
],
"source": [
"!uv pip install llama_stack_client"
]
},
{
"cell_type": "markdown",
"id": "0j5hag7l9x89",
"metadata": {},
"source": [
"### 3. Initialize LlamaStack Client\n",
"\n",
"Create a client connection to the LlamaStack server with API keys for different providers:\n",
"\n",
"- **Fireworks API Key**: For Fireworks models\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "ab4eff97-4565-4c73-b1b3-0020a4c7e2a5",
"metadata": {},
"outputs": [],
"source": [
"from llama_stack_client import LlamaStackClient\n",
"\n",
"client = LlamaStackClient(\n",
" base_url=\"http://0.0.0.0:8321\",\n",
" provider_data={\"fireworks_api_key\": \"***\"},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "vwhexjy1e8o",
"metadata": {},
"source": [
"#### Explore Available Models and Safety Features\n",
"\n",
"Check what models and safety shields are available through your LlamaStack instance."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "880443ef-ac3c-48b1-a80a-7dab5b25ac61",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: GET http://0.0.0.0:8321/v1/models \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: GET http://0.0.0.0:8321/v1/shields \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Available Fireworks models:\n",
"- fireworks/accounts/fireworks/models/llama-v3p1-8b-instruct\n",
"- fireworks/accounts/fireworks/models/llama-v3p1-70b-instruct\n",
"- fireworks/accounts/fireworks/models/llama-v3p1-405b-instruct\n",
"- fireworks/accounts/fireworks/models/llama-v3p2-3b-instruct\n",
"- fireworks/accounts/fireworks/models/llama-v3p2-11b-vision-instruct\n",
"- fireworks/accounts/fireworks/models/llama-v3p2-90b-vision-instruct\n",
"- fireworks/accounts/fireworks/models/llama-v3p3-70b-instruct\n",
"- fireworks/accounts/fireworks/models/llama4-scout-instruct-basic\n",
"- fireworks/accounts/fireworks/models/llama4-maverick-instruct-basic\n",
"- fireworks/nomic-ai/nomic-embed-text-v1.5\n",
"- fireworks/accounts/fireworks/models/llama-guard-3-8b\n",
"- fireworks/accounts/fireworks/models/llama-guard-3-11b-vision\n",
"----\n",
"Available shields (safety models):\n",
"code-scanner\n",
"llama-guard\n",
"nemo-guardrail\n",
"----\n"
]
}
],
"source": [
"print(\"Available Fireworks models:\")\n",
"for m in client.models.list():\n",
" if m.identifier.startswith(\"fireworks/\"):\n",
" print(f\"- {m.identifier}\")\n",
"\n",
"print(\"----\")\n",
"print(\"Available shields (safety models):\")\n",
"for s in client.shields.list():\n",
" print(s.identifier)\n",
"print(\"----\")"
]
},
{
"cell_type": "markdown",
"id": "gojp7at31ht",
"metadata": {},
"source": [
"### 4. Vector Store Setup\n",
"\n",
"#### Create a Vector Store with File Upload\n",
"\n",
"Create a vector store using the OpenAI-compatible vector stores API:\n",
"\n",
"- **Vector Store**: OpenAI-compatible vector store for document storage\n",
"- **File Upload**: Automatic chunking and embedding of uploaded files \n",
"- **Embedding Model**: Sentence Transformers model for text embeddings\n",
"- **Dimensions**: 384-dimensional embeddings"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "be2c2899-ea53-4e5f-b6b8-ed425f5d6572",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/files \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/files \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/files \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"File(id='file-54652c95c56c4c34918a97d7ff8a4320', bytes=41, created_at=1757442621, expires_at=1788978621, filename='shipping_policy.txt', object='file', purpose='assistants')\n",
"File(id='file-fb1227c1d1854da1bd774d21e5b7e41c', bytes=48, created_at=1757442621, expires_at=1788978621, filename='returns_policy.txt', object='file', purpose='assistants')\n",
"File(id='file-673f874852fe42798675a13d06a256e2', bytes=45, created_at=1757442621, expires_at=1788978621, filename='support.txt', object='file', purpose='assistants')\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/vector_stores \"HTTP/1.1 200 OK\"\n"
]
}
],
"source": [
"from io import BytesIO\n",
"\n",
"docs = [\n",
" (\"Acme ships globally in 3-5 business days.\", {\"title\": \"Shipping Policy\"}),\n",
" (\"Returns are accepted within 30 days of purchase.\", {\"title\": \"Returns Policy\"}),\n",
" (\"Support is available 24/7 via chat and email.\", {\"title\": \"Support\"}),\n",
"]\n",
"\n",
"file_ids = []\n",
"for content, metadata in docs:\n",
" with BytesIO(content.encode()) as file_buffer:\n",
" file_buffer.name = f\"{metadata['title'].replace(' ', '_').lower()}.txt\"\n",
" create_file_response = client.files.create(file=file_buffer, purpose=\"assistants\")\n",
" print(create_file_response)\n",
" file_ids.append(create_file_response.id)\n",
"\n",
"# Create vector store with files\n",
"vector_store = client.vector_stores.create(\n",
" name=\"acme_docs\",\n",
" file_ids=file_ids,\n",
" embedding_model=\"sentence-transformers/all-MiniLM-L6-v2\",\n",
" embedding_dimension=384,\n",
" provider_id=\"faiss\"\n",
")"
]
},
{
"cell_type": "markdown",
"id": "9061tmi1zpq",
"metadata": {},
"source": [
"#### Test Vector Store Search\n",
"\n",
"Query the vector store. This performs semantic search to find relevant documents based on the query."
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "ba9d1901-bd5e-4216-b3e6-19dc74551cc6",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/vector_stores/vs_708c060b-45da-423e-8354-68529b4fd1a6/search \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Acme ships globally in 3-5 business days.\n",
"Returns are accepted within 30 days of purchase.\n"
]
}
],
"source": [
"search_response = client.vector_stores.search(\n",
" vector_store_id=vector_store.id,\n",
" query=\"How long does shipping take?\",\n",
" max_num_results=2\n",
")\n",
"for result in search_response.data:\n",
" content = result.content[0].text\n",
" print(content)"
]
},
{
"cell_type": "markdown",
"id": "usne6mbspms",
"metadata": {},
"source": [
"### 5. LangChain Integration\n",
"\n",
"#### Configure LangChain with LlamaStack\n",
"\n",
"Set up LangChain to use LlamaStack's OpenAI-compatible API:\n",
"\n",
"- **Base URL**: Points to LlamaStack's OpenAI endpoint\n",
"- **Headers**: Include Fireworks API key for model access\n",
"- **Model**: Use Meta Llama v3p1 8b instruct model for inference"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "c378bd10-09c2-417c-bdfc-1e0a2dd19084",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"# Point LangChain to Llamastack Server\n",
"llm = ChatOpenAI(\n",
" base_url=\"http://0.0.0.0:8321/v1/openai/v1\",\n",
" api_key=\"dummy\",\n",
" model=\"fireworks/accounts/fireworks/models/llama-v3p1-8b-instruct\",\n",
" default_headers={\"X-LlamaStack-Provider-Data\": '{\"fireworks_api_key\": \"***\"}'},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "5a4ddpcuk3l",
"metadata": {},
"source": [
"#### Test LLM Connection\n",
"\n",
"Verify that LangChain can successfully communicate with the LlamaStack server."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "f88ffb5a-657b-4916-9375-c6ddc156c25e",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content=\"A llama's gentle eyes shine bright,\\nIn the Andes, it roams through morning light.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': None, 'model_name': 'fireworks/accounts/fireworks/models/llama-v3p1-8b-instruct', 'system_fingerprint': None, 'id': 'chatcmpl-602b5967-82a3-476b-9cd2-7d3b29b76ee8', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None}, id='run--0933c465-ff4d-4a7b-b7fb-fd97dd8244f3-0')"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Test llm with simple message\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": \"You are a friendly assistant.\"},\n",
" {\"role\": \"user\", \"content\": \"Write a two-sentence poem about llama.\"},\n",
"]\n",
"llm.invoke(messages)"
]
},
{
"cell_type": "markdown",
"id": "0xh0jg6a0l4a",
"metadata": {},
"source": [
"### 6. Building the RAG Chain\n",
"\n",
"#### Create a Complete RAG Pipeline\n",
"\n",
"Build a LangChain pipeline that combines:\n",
"\n",
"1. **Vector Search**: Query LlamaStack's Open AI compatible Vector Store\n",
"2. **Context Assembly**: Format retrieved documents\n",
"3. **Prompt Template**: Structure the input for the LLM\n",
"4. **LLM Generation**: Generate answers using context\n",
"5. **Output Parsing**: Extract the final response\n",
"\n",
"**Chain Flow**: `Query → Vector Search → Context + Question → LLM → Response`"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "9684427d-dcc7-4544-9af5-8b110d014c42",
"metadata": {},
"outputs": [],
"source": [
"# LangChain for prompt template and chaining + LLAMA Stack Client Vector DB and LLM chat completion\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"\n",
"\n",
"def join_docs(docs):\n",
" return \"\\n\\n\".join([f\"[{d.filename}] {d.content[0].text}\" for d in docs.data])\n",
"\n",
"PROMPT = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", \"You are a helpful assistant. Use the following context to answer.\"),\n",
" (\"user\", \"Question: {question}\\n\\nContext:\\n{context}\"),\n",
" ]\n",
")\n",
"\n",
"vector_step = RunnableLambda(\n",
" lambda x: client.vector_stores.search(\n",
" vector_store_id=vector_store.id,\n",
" query=x,\n",
" max_num_results=2\n",
" )\n",
" )\n",
"\n",
"chain = (\n",
" {\"context\": vector_step | RunnableLambda(join_docs), \"question\": RunnablePassthrough()}\n",
" | PROMPT\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0onu6rhphlra",
"metadata": {},
"source": [
"### 7. Testing the RAG System\n",
"\n",
"#### Example 1: Shipping Query"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "03322188-9509-446a-a4a8-ce3bb83ec87c",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/vector_stores/vs_708c060b-45da-423e-8354-68529b4fd1a6/search \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"❓ How long does shipping take?\n",
"💡 Acme ships globally in 3-5 business days. This means that shipping typically takes between 3 to 5 working days from the date of dispatch or order fulfillment.\n"
]
}
],
"source": [
"query = \"How long does shipping take?\"\n",
"response = chain.invoke(query)\n",
"print(\"❓\", query)\n",
"print(\"💡\", response)"
]
},
{
"cell_type": "markdown",
"id": "b7krhqj88ku",
"metadata": {},
"source": [
"#### Example 2: Returns Policy Query"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "61995550-bb0b-46a8-a5d0-023207475d60",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/vector_stores/vs_708c060b-45da-423e-8354-68529b4fd1a6/search \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://0.0.0.0:8321/v1/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"❓ Can I return a product after 40 days?\n",
"💡 Based on the provided context, you cannot return a product after 40 days. The return window is limited to 30 days from the date of purchase.\n"
]
}
],
"source": [
"query = \"Can I return a product after 40 days?\"\n",
"response = chain.invoke(query)\n",
"print(\"❓\", query)\n",
"print(\"💡\", response)"
]
},
{
"cell_type": "markdown",
"id": "h4w24fadvjs",
"metadata": {},
"source": [
"---\n",
"We have successfully built a RAG system that combines:\n",
"\n",
"- **LlamaStack** for infrastructure (LLM serving + Vector Store)\n",
"- **LangChain** for orchestration (prompts + chains)\n",
"- **Fireworks** for high-quality language models\n",
"\n",
"### Key Benefits\n",
"\n",
"1. **Unified Infrastructure**: Single server for LLMs and Vector Store\n",
"2. **OpenAI Compatibility**: Easy integration with existing LangChain code\n",
"3. **Multi-Provider Support**: Switch between different LLM providers\n",
"4. **Production Ready**: Built-in safety shields and monitoring\n",
"\n",
"### Next Steps\n",
"\n",
"- Add more sophisticated document processing\n",
"- Implement conversation memory\n",
"- Add safety filtering and monitoring\n",
"- Scale to larger document collections\n",
"- Integrate with web frameworks like FastAPI or Streamlit\n",
"\n",
"---\n",
"\n",
"##### 🔧 Cleanup\n",
"\n",
"Don't forget to stop the LlamaStack server when you're done:\n",
"\n",
"```python\n",
"kill_llama_stack_server()\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "15647c46-22ce-4698-af3f-8161329d8e3a",
"metadata": {},
"outputs": [],
"source": [
"kill_llama_stack_server()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View file

@ -0,0 +1,94 @@
# Llama Stack API Stability Leveling
In order to provide a stable experience in Llama Stack, the various APIs need different stability levels indicating the level of support, backwards compatability, and overall production readiness.
## Different Levels
### v1alpha
- Little to no expectation of support between versions
- Breaking changes are permitted
- Datatypes and parameters can break
- Routes can be added and removed
#### Graduation Criteria
- an API can graduate from `v1alpha` to `v1beta` if the team has identified the extent of the non-optional routes and the shape of their parameters/return types for the API eg. `/v1/openai/chat/completions`. Optional types can change.
- CRUD must stay stable once in `v1beta`. This is a commitment to backward compatibility, guaranteeing that most code you write against the v1beta version will not break during future updates. We may make additive changes (like adding a new, optional field to a response), but we will not make breaking changes (like renaming an existing "modelName" field to "name", changing an ID's data type from an integer to a string, or altering an endpoint URL).
- for OpenAI APIs, a comparison to the OpenAI spec for the specific API can be done to ensure completeness.
### v1beta
- API routes remain consistent between versions
- Parameters and return types are not ensured between versions
- API, besides minor fixes and adjustments, should be _almost_ v1. Changes should not be drastic.
#### Graduation Criteria
- an API can graduate from `v1beta` to `v1` if the API surface and datatypes are complete as identified by the team. The parameters and return types that are mandatory for each route are stable. All aspects of graduating from `v1alpha1` to `v1beta` apply as well.
- Optional parameters, routes, or parts of the return type can be added after graduating to `v1`
### v1 (stable)
- Considered stable
- Backwards compatible between Z-streams
- Y-stream breaking changes must go through the proper approval and announcement process.
- Datatypes for a route and its return types cannot change between Z-streams
- Y-stream datatype changes should be sparing, unless the changes are additional net-new parameters
- Must have proper conformance testing as outlined in https://github.com/llamastack/llama-stack/issues/3237
### v2+ (Major Versions)
Introducing a new major version like `/v2` is a significant and disruptive event that should be treated as a last resort. It is reserved for essential changes to a stable `/v1` API that are fundamentally backward-incompatible and cannot be implemented through additive, non-breaking changes or breaking changes across X/Y-Stream releases (x.y.z).
If a `/v2` version is deemed absolutely necessary, it must adhere to the following protocol to ensure a sane and predictable transition for users:
#### Lifecycle Progression
A new major version must follow the same stability lifecycle as `/v1`. It will be introduced as `/v2alpha`, mature to `/v2beta`, and finally become stable as `/v2`.
#### Coexistence:
The new `/v2` API must be introduced alongside the existing `/v1` API and run in parallel. It must not replace the `/v1` API immediately.
#### Deprecation Policy:
When a `/v2` API is introduced, a clear and generous deprecation policy for the `/v1` API must be published simultaneously. This policy must outline the timeline for the eventual removal of the `/v1` API, giving users ample time to migrate.
### API Stability vs. Provider Stability
The leveling introduced in this document relates to the stability of the API and not specifically the providers within the API.
Providers can iterate as much as they want on functionality as long as they work within the bounds of an API. If they need to change the API, then the API should not be `/v1`, or those breaking changes can only happen on a y-stream release basis.
### Approval and Announcement Process for Breaking Changes
- **PR Labeling**: Any pull request that introduces a breaking API change must be clearly labeled with `breaking-change`.
- **PR Title/Commit**: Any pull request that introduces a breaking API change must contain `BREAKING CHANGE` in the title and commit footer. Alternatively, the commit can include `!`, eg. `feat(api)!: title goes here` This is outlined in the [conventional commits documentation](https://www.conventionalcommits.org/en/v1.0.0/#specification)
- **Maintainer Review**: At least one maintainer must explicitly acknowledge the breaking change during review by applying the `breaking-change` label. An approval must come with this label or the acknowledgement this label has already been applied.
- **Announcement**: Breaking changes require inclusion in release notes and, if applicable, a separate communication (e.g., Discord, Github Issues, or GitHub Discussions) prior to release.
If a PR has proper approvals, labels, and commit/title hygiene, the failing API conformance tests will be bypassed.
## Enforcement
### Migration of API routes under `/v1alpha`, `/v1beta`, and `/v1`
Instead of placing every API under `/v1`, any API that is not fully stable or complete should go under `/v1alpha` or `/v1beta`. For example, at the time of this writing, `post_training` belongs here, as well as any OpenAI-compatible API whose surface does not exactly match the upstream OpenAI API it mimics.
This migration is crucial as we get Llama Stack in the hands of users who intend to productize various APIs. A clear view of what is stable and what is actively being developed will enable users to pick and choose various APIs to build their products on.
This migration will be a breaking change for any API moving out of `/v1`. Ideally, this should happen before 0.3.0 and especially 1.0.0.
### `x-stability` tags in the OpenAPI spec for oasdiff
`x-stability` tags allow tools like oasdiff to enforce different rules for different stability levels; these tags should match the routes: [oasdiff stability](https://github.com/oasdiff/oasdiff/blob/main/docs/STABILITY.md)
### Testing
The testing of each stable API is already outlined in [issue #3237](https://github.com/llamastack/llama-stack/issues/3237) and is being worked on. These sorts of conformance tests should apply primarily to `/v1` APIs only, with `/v1alpha` and `/v1beta` having any tests the maintainers see fit as well as basic testing to ensure the routing works properly.
### New APIs going forward
Any subsequently introduced APIs should be introduced as `/v1alpha`

View file

@ -9,8 +9,8 @@ IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
| `url` | `<class 'str'>` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai |
| `api_key` | `pydantic.types.SecretStr \| None` | No | | The watsonx API key, only needed of using the hosted service |
| `project_id` | `str \| None` | No | | The Project ID key, only needed of using the hosted service |
| `api_key` | `pydantic.types.SecretStr \| None` | No | | The watsonx API key |
| `project_id` | `str \| None` | No | | The Project ID key |
| `timeout` | `<class 'int'>` | No | 60 | Timeout for the HTTP requests |
## Sample Configuration

View file

@ -93,3 +93,11 @@ class Benchmarks(Protocol):
:param metadata: The metadata to use for the benchmark.
"""
...
@webmethod(route="/eval/benchmarks/{benchmark_id}", method="DELETE")
async def unregister_benchmark(self, benchmark_id: str) -> None:
"""Unregister a benchmark.
:param benchmark_id: The ID of the benchmark to unregister.
"""
...

View file

@ -197,3 +197,11 @@ class ScoringFunctions(Protocol):
:param params: The parameters for the scoring function for benchmark eval, these can be overridden for app eval.
"""
...
@webmethod(route="/scoring-functions/{scoring_fn_id:path}", method="DELETE")
async def unregister_scoring_function(self, scoring_fn_id: str) -> None:
"""Unregister a scoring function.
:param scoring_fn_id: The ID of the scoring function to unregister.
"""
...

View file

@ -48,15 +48,12 @@ def setup_verify_download_parser(parser: argparse.ArgumentParser) -> None:
parser.set_defaults(func=partial(run_verify_cmd, parser=parser))
def calculate_md5(filepath: Path, chunk_size: int = 8192) -> str:
# NOTE: MD5 is used here only for download integrity verification,
# not for security purposes
# TODO: switch to SHA256
md5_hash = hashlib.md5(usedforsecurity=False)
def calculate_sha256(filepath: Path, chunk_size: int = 8192) -> str:
sha256_hash = hashlib.sha256()
with open(filepath, "rb") as f:
for chunk in iter(lambda: f.read(chunk_size), b""):
md5_hash.update(chunk)
return md5_hash.hexdigest()
sha256_hash.update(chunk)
return sha256_hash.hexdigest()
def load_checksums(checklist_path: Path) -> dict[str, str]:
@ -64,10 +61,10 @@ def load_checksums(checklist_path: Path) -> dict[str, str]:
with open(checklist_path) as f:
for line in f:
if line.strip():
md5sum, filepath = line.strip().split(" ", 1)
sha256sum, filepath = line.strip().split(" ", 1)
# Remove leading './' if present
filepath = filepath.lstrip("./")
checksums[filepath] = md5sum
checksums[filepath] = sha256sum
return checksums
@ -88,7 +85,7 @@ def verify_files(model_dir: Path, checksums: dict[str, str], console: Console) -
matches = False
if exists:
actual_hash = calculate_md5(full_path)
actual_hash = calculate_sha256(full_path)
matches = actual_hash == expected_hash
results.append(

View file

@ -56,3 +56,7 @@ class BenchmarksRoutingTable(CommonRoutingTableImpl, Benchmarks):
provider_resource_id=provider_benchmark_id,
)
await self.register_object(benchmark)
async def unregister_benchmark(self, benchmark_id: str) -> None:
existing_benchmark = await self.get_benchmark(benchmark_id)
await self.unregister_object(existing_benchmark)

View file

@ -64,6 +64,10 @@ async def unregister_object_from_provider(obj: RoutableObject, p: Any) -> None:
return await p.unregister_shield(obj.identifier)
elif api == Api.datasetio:
return await p.unregister_dataset(obj.identifier)
elif api == Api.eval:
return await p.unregister_benchmark(obj.identifier)
elif api == Api.scoring:
return await p.unregister_scoring_function(obj.identifier)
elif api == Api.tool_runtime:
return await p.unregister_toolgroup(obj.identifier)
else:

View file

@ -60,3 +60,7 @@ class ScoringFunctionsRoutingTable(CommonRoutingTableImpl, ScoringFunctions):
)
scoring_fn.provider_id = provider_id
await self.register_object(scoring_fn)
async def unregister_scoring_function(self, scoring_fn_id: str) -> None:
existing_scoring_fn = await self.get_scoring_function(scoring_fn_id)
await self.unregister_object(existing_scoring_fn)

View file

@ -10,6 +10,7 @@ apis:
- telemetry
- tool_runtime
- vector_io
- files
providers:
inference:
- provider_id: watsonx
@ -94,6 +95,14 @@ providers:
provider_type: inline::rag-runtime
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
files:
- provider_id: meta-reference-files
provider_type: inline::localfs
config:
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/watsonx/files}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/files_metadata.db
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/registry.db

View file

@ -9,6 +9,7 @@ from pathlib import Path
from llama_stack.apis.models import ModelType
from llama_stack.core.datatypes import BuildProvider, ModelInput, Provider, ToolGroupInput
from llama_stack.distributions.template import DistributionTemplate, RunConfigSettings, get_model_registry
from llama_stack.providers.inline.files.localfs.config import LocalfsFilesImplConfig
from llama_stack.providers.inline.inference.sentence_transformers import (
SentenceTransformersInferenceConfig,
)
@ -16,7 +17,7 @@ from llama_stack.providers.remote.inference.watsonx import WatsonXConfig
from llama_stack.providers.remote.inference.watsonx.models import MODEL_ENTRIES
def get_distribution_template() -> DistributionTemplate:
def get_distribution_template(name: str = "watsonx") -> DistributionTemplate:
providers = {
"inference": [
BuildProvider(provider_type="remote::watsonx"),
@ -42,6 +43,7 @@ def get_distribution_template() -> DistributionTemplate:
BuildProvider(provider_type="inline::rag-runtime"),
BuildProvider(provider_type="remote::model-context-protocol"),
],
"files": [BuildProvider(provider_type="inline::localfs")],
}
inference_provider = Provider(
@ -79,9 +81,14 @@ def get_distribution_template() -> DistributionTemplate:
},
)
files_provider = Provider(
provider_id="meta-reference-files",
provider_type="inline::localfs",
config=LocalfsFilesImplConfig.sample_run_config(f"~/.llama/distributions/{name}"),
)
default_models, _ = get_model_registry(available_models)
return DistributionTemplate(
name="watsonx",
name=name,
distro_type="remote_hosted",
description="Use watsonx for running LLM inference",
container_image=None,
@ -92,6 +99,7 @@ def get_distribution_template() -> DistributionTemplate:
"run.yaml": RunConfigSettings(
provider_overrides={
"inference": [inference_provider, embedding_provider],
"files": [files_provider],
},
default_models=default_models + [embedding_model],
default_tool_groups=default_tool_groups,

View file

@ -75,6 +75,13 @@ class MetaReferenceEvalImpl(
)
self.benchmarks[task_def.identifier] = task_def
async def unregister_benchmark(self, benchmark_id: str) -> None:
if benchmark_id in self.benchmarks:
del self.benchmarks[benchmark_id]
key = f"{EVAL_TASKS_PREFIX}{benchmark_id}"
await self.kvstore.delete(key)
async def run_eval(
self,
benchmark_id: str,

View file

@ -63,6 +63,9 @@ class LlmAsJudgeScoringImpl(
async def register_scoring_function(self, function_def: ScoringFn) -> None:
self.llm_as_judge_fn.register_scoring_fn_def(function_def)
async def unregister_scoring_function(self, scoring_fn_id: str) -> None:
self.llm_as_judge_fn.unregister_scoring_fn_def(scoring_fn_id)
async def score_batch(
self,
dataset_id: str,

View file

@ -51,18 +51,23 @@ class NVIDIAEvalImpl(
async def shutdown(self) -> None: ...
async def _evaluator_get(self, path):
async def _evaluator_get(self, path: str):
"""Helper for making GET requests to the evaluator service."""
response = requests.get(url=f"{self.config.evaluator_url}{path}")
response.raise_for_status()
return response.json()
async def _evaluator_post(self, path, data):
async def _evaluator_post(self, path: str, data: dict[str, Any]):
"""Helper for making POST requests to the evaluator service."""
response = requests.post(url=f"{self.config.evaluator_url}{path}", json=data)
response.raise_for_status()
return response.json()
async def _evaluator_delete(self, path: str) -> None:
"""Helper for making DELETE requests to the evaluator service."""
response = requests.delete(url=f"{self.config.evaluator_url}{path}")
response.raise_for_status()
async def register_benchmark(self, task_def: Benchmark) -> None:
"""Register a benchmark as an evaluation configuration."""
await self._evaluator_post(
@ -75,6 +80,10 @@ class NVIDIAEvalImpl(
},
)
async def unregister_benchmark(self, benchmark_id: str) -> None:
"""Unregister a benchmark evaluation configuration from NeMo Evaluator."""
await self._evaluator_delete(f"/v1/evaluation/configs/{DEFAULT_NAMESPACE}/{benchmark_id}")
async def run_eval(
self,
benchmark_id: str,

View file

@ -8,6 +8,7 @@
from collections.abc import AsyncGenerator
from huggingface_hub import AsyncInferenceClient, HfApi
from pydantic import SecretStr
from llama_stack.apis.common.content_types import (
InterleavedContent,
@ -33,6 +34,7 @@ from llama_stack.apis.inference import (
ToolPromptFormat,
)
from llama_stack.apis.models import Model
from llama_stack.apis.models.models import ModelType
from llama_stack.log import get_logger
from llama_stack.models.llama.sku_list import all_registered_models
from llama_stack.providers.datatypes import ModelsProtocolPrivate
@ -41,16 +43,15 @@ from llama_stack.providers.utils.inference.model_registry import (
build_hf_repo_model_entry,
)
from llama_stack.providers.utils.inference.openai_compat import (
OpenAIChatCompletionToLlamaStackMixin,
OpenAICompatCompletionChoice,
OpenAICompatCompletionResponse,
OpenAICompletionToLlamaStackMixin,
get_sampling_options,
process_chat_completion_response,
process_chat_completion_stream_response,
process_completion_response,
process_completion_stream_response,
)
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack.providers.utils.inference.prompt_adapter import (
chat_completion_request_to_model_input_info,
completion_request_to_prompt_model_input_info,
@ -73,26 +74,49 @@ def build_hf_repo_model_entries():
class _HfAdapter(
OpenAIMixin,
Inference,
OpenAIChatCompletionToLlamaStackMixin,
OpenAICompletionToLlamaStackMixin,
ModelsProtocolPrivate,
):
client: AsyncInferenceClient
url: str
api_key: SecretStr
hf_client: AsyncInferenceClient
max_tokens: int
model_id: str
overwrite_completion_id = True # TGI always returns id=""
def __init__(self) -> None:
self.register_helper = ModelRegistryHelper(build_hf_repo_model_entries())
self.huggingface_repo_to_llama_model_id = {
model.huggingface_repo: model.descriptor() for model in all_registered_models() if model.huggingface_repo
}
def get_api_key(self):
return self.api_key.get_secret_value()
def get_base_url(self):
return self.url
async def shutdown(self) -> None:
pass
async def list_models(self) -> list[Model] | None:
models = []
async for model in self.client.models.list():
models.append(
Model(
identifier=model.id,
provider_resource_id=model.id,
provider_id=self.__provider_id__,
metadata={},
model_type=ModelType.llm,
)
)
return models
async def register_model(self, model: Model) -> Model:
model = await self.register_helper.register_model(model)
if model.provider_resource_id != self.model_id:
raise ValueError(
f"Model {model.provider_resource_id} does not match the model {self.model_id} served by TGI."
@ -176,7 +200,7 @@ class _HfAdapter(
params = await self._get_params_for_completion(request)
async def _generate_and_convert_to_openai_compat():
s = await self.client.text_generation(**params)
s = await self.hf_client.text_generation(**params)
async for chunk in s:
token_result = chunk.token
finish_reason = None
@ -194,7 +218,7 @@ class _HfAdapter(
async def _nonstream_completion(self, request: CompletionRequest) -> AsyncGenerator:
params = await self._get_params_for_completion(request)
r = await self.client.text_generation(**params)
r = await self.hf_client.text_generation(**params)
choice = OpenAICompatCompletionChoice(
finish_reason=r.details.finish_reason,
@ -241,7 +265,7 @@ class _HfAdapter(
async def _nonstream_chat_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse:
params = await self._get_params(request)
r = await self.client.text_generation(**params)
r = await self.hf_client.text_generation(**params)
choice = OpenAICompatCompletionChoice(
finish_reason=r.details.finish_reason,
@ -256,7 +280,7 @@ class _HfAdapter(
params = await self._get_params(request)
async def _generate_and_convert_to_openai_compat():
s = await self.client.text_generation(**params)
s = await self.hf_client.text_generation(**params)
async for chunk in s:
token_result = chunk.token
@ -308,18 +332,21 @@ class TGIAdapter(_HfAdapter):
if not config.url:
raise ValueError("You must provide a URL in run.yaml (or via the TGI_URL environment variable) to use TGI.")
log.info(f"Initializing TGI client with url={config.url}")
self.client = AsyncInferenceClient(model=config.url, provider="hf-inference")
endpoint_info = await self.client.get_endpoint_info()
self.hf_client = AsyncInferenceClient(model=config.url, provider="hf-inference")
endpoint_info = await self.hf_client.get_endpoint_info()
self.max_tokens = endpoint_info["max_total_tokens"]
self.model_id = endpoint_info["model_id"]
self.url = f"{config.url.rstrip('/')}/v1"
self.api_key = SecretStr("NO_KEY")
class InferenceAPIAdapter(_HfAdapter):
async def initialize(self, config: InferenceAPIImplConfig) -> None:
self.client = AsyncInferenceClient(model=config.huggingface_repo, token=config.api_token.get_secret_value())
endpoint_info = await self.client.get_endpoint_info()
self.hf_client = AsyncInferenceClient(model=config.huggingface_repo, token=config.api_token.get_secret_value())
endpoint_info = await self.hf_client.get_endpoint_info()
self.max_tokens = endpoint_info["max_total_tokens"]
self.model_id = endpoint_info["model_id"]
# TODO: how do we set url for this?
class InferenceEndpointAdapter(_HfAdapter):
@ -331,6 +358,7 @@ class InferenceEndpointAdapter(_HfAdapter):
endpoint.wait(timeout=60)
# Initialize the adapter
self.client = endpoint.async_client
self.hf_client = endpoint.async_client
self.model_id = endpoint.repository
self.max_tokens = int(endpoint.raw["model"]["image"]["custom"]["env"]["MAX_TOTAL_TOKENS"])
# TODO: how do we set url for this?

View file

@ -26,11 +26,11 @@ class WatsonXConfig(BaseModel):
)
api_key: SecretStr | None = Field(
default_factory=lambda: os.getenv("WATSONX_API_KEY"),
description="The watsonx API key, only needed of using the hosted service",
description="The watsonx API key",
)
project_id: str | None = Field(
default_factory=lambda: os.getenv("WATSONX_PROJECT_ID"),
description="The Project ID key, only needed of using the hosted service",
description="The Project ID key",
)
timeout: int = Field(
default=60,

View file

@ -38,6 +38,7 @@ from llama_stack.apis.inference import (
TopKSamplingStrategy,
TopPSamplingStrategy,
)
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
from llama_stack.providers.utils.inference.openai_compat import (
OpenAICompatCompletionChoice,
@ -57,14 +58,29 @@ from llama_stack.providers.utils.inference.prompt_adapter import (
from . import WatsonXConfig
from .models import MODEL_ENTRIES
logger = get_logger(name=__name__, category="inference::watsonx")
# Note on structured output
# WatsonX returns responses with a json embedded into a string.
# Examples:
# ChatCompletionResponse(completion_message=CompletionMessage(content='```json\n{\n
# "first_name": "Michael",\n "last_name": "Jordan",\n'...)
# Not even a valid JSON, but we can still extract the JSON from the content
# CompletionResponse(content=' \nThe best answer is $\\boxed{\\{"name": "Michael Jordan",
# "year_born": "1963", "year_retired": "2003"\\}}$')
# Find the start of the boxed content
class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
def __init__(self, config: WatsonXConfig) -> None:
ModelRegistryHelper.__init__(self, MODEL_ENTRIES)
print(f"Initializing watsonx InferenceAdapter({config.url})...")
logger.info(f"Initializing watsonx InferenceAdapter({config.url})...")
self._config = config
self._openai_client: AsyncOpenAI | None = None
self._project_id = self._config.project_id

View file

@ -4,6 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import uuid
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator
from typing import Any
@ -43,6 +44,12 @@ class OpenAIMixin(ABC):
The model_store is set in routing_tables/common.py during provider initialization.
"""
# Allow subclasses to control whether to overwrite the 'id' field in OpenAI responses
# is overwritten with a client-side generated id.
#
# This is useful for providers that do not return a unique id in the response.
overwrite_completion_id: bool = False
@abstractmethod
def get_api_key(self) -> str:
"""
@ -110,6 +117,23 @@ class OpenAIMixin(ABC):
raise ValueError(f"Model {model} has no provider_resource_id")
return model_obj.provider_resource_id
async def _maybe_overwrite_id(self, resp: Any, stream: bool | None) -> Any:
if not self.overwrite_completion_id:
return resp
new_id = f"cltsd-{uuid.uuid4()}"
if stream:
async def _gen():
async for chunk in resp:
chunk.id = new_id
yield chunk
return _gen()
else:
resp.id = new_id
return resp
async def openai_completion(
self,
model: str,
@ -147,7 +171,7 @@ class OpenAIMixin(ABC):
extra_body["guided_choice"] = guided_choice
# TODO: fix openai_completion to return type compatible with OpenAI's API response
return await self.client.completions.create( # type: ignore[no-any-return]
resp = await self.client.completions.create(
**await prepare_openai_completion_params(
model=await self._get_provider_model_id(model),
prompt=prompt,
@ -171,6 +195,8 @@ class OpenAIMixin(ABC):
extra_body=extra_body,
)
return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return]
async def openai_chat_completion(
self,
model: str,
@ -200,8 +226,7 @@ class OpenAIMixin(ABC):
"""
Direct OpenAI chat completion API call.
"""
# Type ignore because return types are compatible
return await self.client.chat.completions.create( # type: ignore[no-any-return]
resp = await self.client.chat.completions.create(
**await prepare_openai_completion_params(
model=await self._get_provider_model_id(model),
messages=messages,
@ -229,6 +254,8 @@ class OpenAIMixin(ABC):
)
)
return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return]
async def openai_embeddings(
self,
model: str,

View file

@ -12,14 +12,12 @@ import uuid
def generate_chunk_id(document_id: str, chunk_text: str, chunk_window: str | None = None) -> str:
"""
Generate a unique chunk ID using a hash of the document ID and chunk text.
Note: MD5 is used only to calculate an identifier, not for security purposes.
Adding usedforsecurity=False for compatibility with FIPS environments.
Then use the first 32 characters of the hash to create a UUID.
"""
hash_input = f"{document_id}:{chunk_text}".encode()
if chunk_window:
hash_input += f":{chunk_window}".encode()
return str(uuid.UUID(hashlib.md5(hash_input, usedforsecurity=False).hexdigest()))
return str(uuid.UUID(hashlib.sha256(hash_input).hexdigest()[:32]))
def proper_case(s: str) -> str:

View file

@ -15,6 +15,8 @@ from enum import StrEnum
from pathlib import Path
from typing import Any, Literal, cast
from openai import NOT_GIVEN
from llama_stack.log import get_logger
logger = get_logger(__name__, category="testing")
@ -198,20 +200,15 @@ def _model_identifiers_digest(endpoint: str, response: dict[str, Any]) -> str:
Supported endpoints:
- '/api/tags' (Ollama): response body has 'models': [ { name/model/digest/id/... }, ... ]
- '/v1/models' (OpenAI): response body has 'data': [ { id: ... }, ... ]
- '/v1/models' (OpenAI): response body is: [ { id: ... }, ... ]
Returns a list of unique identifiers or None if structure doesn't match.
"""
body = response["body"]
if endpoint == "/api/tags":
items = body.get("models")
idents = [m.model for m in items]
else:
items = body.get("data")
idents = [m.id for m in items]
items = response["body"]
idents = [m.model if endpoint == "/api/tags" else m.id for m in items]
return sorted(set(idents))
identifiers = _extract_model_identifiers()
return hashlib.sha1(("|".join(identifiers)).encode("utf-8")).hexdigest()[:8]
return hashlib.sha256(("|".join(identifiers)).encode("utf-8")).hexdigest()[:8]
def _combine_model_list_responses(endpoint: str, records: list[dict[str, Any]]) -> dict[str, Any] | None:
@ -219,28 +216,22 @@ def _combine_model_list_responses(endpoint: str, records: list[dict[str, Any]])
seen: dict[str, dict[str, Any]] = {}
for rec in records:
body = rec["response"]["body"]
if endpoint == "/api/tags":
items = body.models
elif endpoint == "/v1/models":
items = body.data
else:
items = []
for m in items:
if endpoint == "/v1/models":
if endpoint == "/v1/models":
for m in body:
key = m.id
else:
seen[key] = m
elif endpoint == "/api/tags":
for m in body.models:
key = m.model
seen[key] = m
seen[key] = m
ordered = [seen[k] for k in sorted(seen.keys())]
canonical = records[0]
canonical_req = canonical.get("request", {})
if isinstance(canonical_req, dict):
canonical_req["endpoint"] = endpoint
if endpoint == "/v1/models":
body = {"data": ordered, "object": "list"}
else:
body = ordered
if endpoint == "/api/tags":
from ollama import ListResponse
body = ListResponse(models=ordered)
@ -251,12 +242,17 @@ async def _patched_inference_method(original_method, self, client_type, endpoint
global _current_mode, _current_storage
if _current_mode == InferenceMode.LIVE or _current_storage is None:
# Normal operation
return await original_method(self, *args, **kwargs)
if endpoint == "/v1/models":
return original_method(self, *args, **kwargs)
else:
return await original_method(self, *args, **kwargs)
# Get base URL based on client type
if client_type == "openai":
base_url = str(self._client.base_url)
# the OpenAI client methods may pass NOT_GIVEN for unset parameters; filter these out
kwargs = {k: v for k, v in kwargs.items() if v is not NOT_GIVEN}
elif client_type == "ollama":
# Get base URL from the client (Ollama client uses host attribute)
base_url = getattr(self, "host", "http://localhost:11434")
@ -300,7 +296,14 @@ async def _patched_inference_method(original_method, self, client_type, endpoint
)
elif _current_mode == InferenceMode.RECORD:
response = await original_method(self, *args, **kwargs)
if endpoint == "/v1/models":
response = original_method(self, *args, **kwargs)
else:
response = await original_method(self, *args, **kwargs)
# we want to store the result of the iterator, not the iterator itself
if endpoint == "/v1/models":
response = [m async for m in response]
request_data = {
"method": method,
@ -380,10 +383,14 @@ def patch_inference_clients():
_original_methods["embeddings_create"], self, "openai", "/v1/embeddings", *args, **kwargs
)
async def patched_models_list(self, *args, **kwargs):
return await _patched_inference_method(
_original_methods["models_list"], self, "openai", "/v1/models", *args, **kwargs
)
def patched_models_list(self, *args, **kwargs):
async def _iter():
for item in await _patched_inference_method(
_original_methods["models_list"], self, "openai", "/v1/models", *args, **kwargs
):
yield item
return _iter()
# Apply OpenAI patches
AsyncChatCompletions.create = patched_chat_completions_create

View file

@ -11,7 +11,7 @@
"@radix-ui/react-collapsible": "^1.1.12",
"@radix-ui/react-dialog": "^1.1.13",
"@radix-ui/react-dropdown-menu": "^2.1.16",
"@radix-ui/react-select": "^2.2.5",
"@radix-ui/react-select": "^2.2.6",
"@radix-ui/react-separator": "^1.1.7",
"@radix-ui/react-slot": "^1.2.3",
"@radix-ui/react-tooltip": "^1.2.8",
@ -20,7 +20,7 @@
"framer-motion": "^12.23.12",
"llama-stack-client": "^0.2.21",
"lucide-react": "^0.542.0",
"next": "15.3.3",
"next": "15.5.3",
"next-auth": "^4.24.11",
"next-themes": "^0.4.6",
"react": "^19.0.0",
@ -664,9 +664,9 @@
}
},
"node_modules/@emnapi/runtime": {
"version": "1.4.3",
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.4.3.tgz",
"integrity": "sha512-pBPWdu6MLKROBX05wSNKcNb++m5Er+KQ9QkB+WVM+pW2Kx9hoSrVTnu3BdkI5eBLZoKu/J6mW/B6i6bJB2ytXQ==",
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.5.0.tgz",
"integrity": "sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==",
"license": "MIT",
"optional": true,
"dependencies": {
@ -927,9 +927,9 @@
}
},
"node_modules/@img/sharp-darwin-arm64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.1.tgz",
"integrity": "sha512-pn44xgBtgpEbZsu+lWf2KNb6OAf70X68k+yk69Ic2Xz11zHR/w24/U49XT7AeRwJ0Px+mhALhU5LPci1Aymk7A==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.3.tgz",
"integrity": "sha512-ryFMfvxxpQRsgZJqBd4wsttYQbCxsJksrv9Lw/v798JcQ8+w84mBWuXwl+TT0WJ/WrYOLaYpwQXi3sA9nTIaIg==",
"cpu": [
"arm64"
],
@ -945,13 +945,13 @@
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-darwin-arm64": "1.1.0"
"@img/sharp-libvips-darwin-arm64": "1.2.0"
}
},
"node_modules/@img/sharp-darwin-x64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.1.tgz",
"integrity": "sha512-VfuYgG2r8BpYiOUN+BfYeFo69nP/MIwAtSJ7/Zpxc5QF3KS22z8Pvg3FkrSFJBPNQ7mmcUcYQFBmEQp7eu1F8Q==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.3.tgz",
"integrity": "sha512-yHpJYynROAj12TA6qil58hmPmAwxKKC7reUqtGLzsOHfP7/rniNGTL8tjWX6L3CTV4+5P4ypcS7Pp+7OB+8ihA==",
"cpu": [
"x64"
],
@ -967,13 +967,13 @@
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-darwin-x64": "1.1.0"
"@img/sharp-libvips-darwin-x64": "1.2.0"
}
},
"node_modules/@img/sharp-libvips-darwin-arm64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.1.0.tgz",
"integrity": "sha512-HZ/JUmPwrJSoM4DIQPv/BfNh9yrOA8tlBbqbLz4JZ5uew2+o22Ik+tHQJcih7QJuSa0zo5coHTfD5J8inqj9DA==",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.0.tgz",
"integrity": "sha512-sBZmpwmxqwlqG9ueWFXtockhsxefaV6O84BMOrhtg/YqbTaRdqDE7hxraVE3y6gVM4eExmfzW4a8el9ArLeEiQ==",
"cpu": [
"arm64"
],
@ -987,9 +987,9 @@
}
},
"node_modules/@img/sharp-libvips-darwin-x64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.1.0.tgz",
"integrity": "sha512-Xzc2ToEmHN+hfvsl9wja0RlnXEgpKNmftriQp6XzY/RaSfwD9th+MSh0WQKzUreLKKINb3afirxW7A0fz2YWuQ==",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.0.tgz",
"integrity": "sha512-M64XVuL94OgiNHa5/m2YvEQI5q2cl9d/wk0qFTDVXcYzi43lxuiFTftMR1tOnFQovVXNZJ5TURSDK2pNe9Yzqg==",
"cpu": [
"x64"
],
@ -1003,9 +1003,9 @@
}
},
"node_modules/@img/sharp-libvips-linux-arm": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.1.0.tgz",
"integrity": "sha512-s8BAd0lwUIvYCJyRdFqvsj+BJIpDBSxs6ivrOPm/R7piTs5UIwY5OjXrP2bqXC9/moGsyRa37eYWYCOGVXxVrA==",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.0.tgz",
"integrity": "sha512-mWd2uWvDtL/nvIzThLq3fr2nnGfyr/XMXlq8ZJ9WMR6PXijHlC3ksp0IpuhK6bougvQrchUAfzRLnbsen0Cqvw==",
"cpu": [
"arm"
],
@ -1019,9 +1019,9 @@
}
},
"node_modules/@img/sharp-libvips-linux-arm64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.1.0.tgz",
"integrity": "sha512-IVfGJa7gjChDET1dK9SekxFFdflarnUB8PwW8aGwEoF3oAsSDuNUTYS+SKDOyOJxQyDC1aPFMuRYLoDInyV9Ew==",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.0.tgz",
"integrity": "sha512-RXwd0CgG+uPRX5YYrkzKyalt2OJYRiJQ8ED/fi1tq9WQW2jsQIn0tqrlR5l5dr/rjqq6AHAxURhj2DVjyQWSOA==",
"cpu": [
"arm64"
],
@ -1035,9 +1035,9 @@
}
},
"node_modules/@img/sharp-libvips-linux-ppc64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.1.0.tgz",
"integrity": "sha512-tiXxFZFbhnkWE2LA8oQj7KYR+bWBkiV2nilRldT7bqoEZ4HiDOcePr9wVDAZPi/Id5fT1oY9iGnDq20cwUz8lQ==",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.0.tgz",
"integrity": "sha512-Xod/7KaDDHkYu2phxxfeEPXfVXFKx70EAFZ0qyUdOjCcxbjqyJOEUpDe6RIyaunGxT34Anf9ue/wuWOqBW2WcQ==",
"cpu": [
"ppc64"
],
@ -1051,9 +1051,9 @@
}
},
"node_modules/@img/sharp-libvips-linux-s390x": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.1.0.tgz",
"integrity": "sha512-xukSwvhguw7COyzvmjydRb3x/09+21HykyapcZchiCUkTThEQEOMtBj9UhkaBRLuBrgLFzQ2wbxdeCCJW/jgJA==",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.0.tgz",
"integrity": "sha512-eMKfzDxLGT8mnmPJTNMcjfO33fLiTDsrMlUVcp6b96ETbnJmd4uvZxVJSKPQfS+odwfVaGifhsB07J1LynFehw==",
"cpu": [
"s390x"
],
@ -1067,9 +1067,9 @@
}
},
"node_modules/@img/sharp-libvips-linux-x64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.1.0.tgz",
"integrity": "sha512-yRj2+reB8iMg9W5sULM3S74jVS7zqSzHG3Ol/twnAAkAhnGQnpjj6e4ayUz7V+FpKypwgs82xbRdYtchTTUB+Q==",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.0.tgz",
"integrity": "sha512-ZW3FPWIc7K1sH9E3nxIGB3y3dZkpJlMnkk7z5tu1nSkBoCgw2nSRTFHI5pB/3CQaJM0pdzMF3paf9ckKMSE9Tg==",
"cpu": [
"x64"
],
@ -1083,9 +1083,9 @@
}
},
"node_modules/@img/sharp-libvips-linuxmusl-arm64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.1.0.tgz",
"integrity": "sha512-jYZdG+whg0MDK+q2COKbYidaqW/WTz0cc1E+tMAusiDygrM4ypmSCjOJPmFTvHHJ8j/6cAGyeDWZOsK06tP33w==",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.0.tgz",
"integrity": "sha512-UG+LqQJbf5VJ8NWJ5Z3tdIe/HXjuIdo4JeVNADXBFuG7z9zjoegpzzGIyV5zQKi4zaJjnAd2+g2nna8TZvuW9Q==",
"cpu": [
"arm64"
],
@ -1099,9 +1099,9 @@
}
},
"node_modules/@img/sharp-libvips-linuxmusl-x64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.1.0.tgz",
"integrity": "sha512-wK7SBdwrAiycjXdkPnGCPLjYb9lD4l6Ze2gSdAGVZrEL05AOUJESWU2lhlC+Ffn5/G+VKuSm6zzbQSzFX/P65A==",
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.0.tgz",
"integrity": "sha512-SRYOLR7CXPgNze8akZwjoGBoN1ThNZoqpOgfnOxmWsklTGVfJiGJoC/Lod7aNMGA1jSsKWM1+HRX43OP6p9+6Q==",
"cpu": [
"x64"
],
@ -1115,9 +1115,9 @@
}
},
"node_modules/@img/sharp-linux-arm": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.1.tgz",
"integrity": "sha512-anKiszvACti2sGy9CirTlNyk7BjjZPiML1jt2ZkTdcvpLU1YH6CXwRAZCA2UmRXnhiIftXQ7+Oh62Ji25W72jA==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.3.tgz",
"integrity": "sha512-oBK9l+h6KBN0i3dC8rYntLiVfW8D8wH+NPNT3O/WBHeW0OQWCjfWksLUaPidsrDKpJgXp3G3/hkmhptAW0I3+A==",
"cpu": [
"arm"
],
@ -1133,13 +1133,13 @@
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-arm": "1.1.0"
"@img/sharp-libvips-linux-arm": "1.2.0"
}
},
"node_modules/@img/sharp-linux-arm64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.1.tgz",
"integrity": "sha512-kX2c+vbvaXC6vly1RDf/IWNXxrlxLNpBVWkdpRq5Ka7OOKj6nr66etKy2IENf6FtOgklkg9ZdGpEu9kwdlcwOQ==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.3.tgz",
"integrity": "sha512-QdrKe3EvQrqwkDrtuTIjI0bu6YEJHTgEeqdzI3uWJOH6G1O8Nl1iEeVYRGdj1h5I21CqxSvQp1Yv7xeU3ZewbA==",
"cpu": [
"arm64"
],
@ -1155,13 +1155,35 @@
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-arm64": "1.1.0"
"@img/sharp-libvips-linux-arm64": "1.2.0"
}
},
"node_modules/@img/sharp-linux-ppc64": {
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.3.tgz",
"integrity": "sha512-GLtbLQMCNC5nxuImPR2+RgrviwKwVql28FWZIW1zWruy6zLgA5/x2ZXk3mxj58X/tszVF69KK0Is83V8YgWhLA==",
"cpu": [
"ppc64"
],
"license": "Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-ppc64": "1.2.0"
}
},
"node_modules/@img/sharp-linux-s390x": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.1.tgz",
"integrity": "sha512-7s0KX2tI9mZI2buRipKIw2X1ufdTeaRgwmRabt5bi9chYfhur+/C1OXg3TKg/eag1W+6CCWLVmSauV1owmRPxA==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.3.tgz",
"integrity": "sha512-3gahT+A6c4cdc2edhsLHmIOXMb17ltffJlxR0aC2VPZfwKoTGZec6u5GrFgdR7ciJSsHT27BD3TIuGcuRT0KmQ==",
"cpu": [
"s390x"
],
@ -1177,13 +1199,13 @@
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-s390x": "1.1.0"
"@img/sharp-libvips-linux-s390x": "1.2.0"
}
},
"node_modules/@img/sharp-linux-x64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.1.tgz",
"integrity": "sha512-wExv7SH9nmoBW3Wr2gvQopX1k8q2g5V5Iag8Zk6AVENsjwd+3adjwxtp3Dcu2QhOXr8W9NusBU6XcQUohBZ5MA==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.3.tgz",
"integrity": "sha512-8kYso8d806ypnSq3/Ly0QEw90V5ZoHh10yH0HnrzOCr6DKAPI6QVHvwleqMkVQ0m+fc7EH8ah0BB0QPuWY6zJQ==",
"cpu": [
"x64"
],
@ -1199,13 +1221,13 @@
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-x64": "1.1.0"
"@img/sharp-libvips-linux-x64": "1.2.0"
}
},
"node_modules/@img/sharp-linuxmusl-arm64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.1.tgz",
"integrity": "sha512-DfvyxzHxw4WGdPiTF0SOHnm11Xv4aQexvqhRDAoD00MzHekAj9a/jADXeXYCDFH/DzYruwHbXU7uz+H+nWmSOQ==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.3.tgz",
"integrity": "sha512-vAjbHDlr4izEiXM1OTggpCcPg9tn4YriK5vAjowJsHwdBIdx0fYRsURkxLG2RLm9gyBq66gwtWI8Gx0/ov+JKQ==",
"cpu": [
"arm64"
],
@ -1221,13 +1243,13 @@
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linuxmusl-arm64": "1.1.0"
"@img/sharp-libvips-linuxmusl-arm64": "1.2.0"
}
},
"node_modules/@img/sharp-linuxmusl-x64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.1.tgz",
"integrity": "sha512-pax/kTR407vNb9qaSIiWVnQplPcGU8LRIJpDT5o8PdAx5aAA7AS3X9PS8Isw1/WfqgQorPotjrZL3Pqh6C5EBg==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.3.tgz",
"integrity": "sha512-gCWUn9547K5bwvOn9l5XGAEjVTTRji4aPTqLzGXHvIr6bIDZKNTA34seMPgM0WmSf+RYBH411VavCejp3PkOeQ==",
"cpu": [
"x64"
],
@ -1243,20 +1265,20 @@
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linuxmusl-x64": "1.1.0"
"@img/sharp-libvips-linuxmusl-x64": "1.2.0"
}
},
"node_modules/@img/sharp-wasm32": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.1.tgz",
"integrity": "sha512-YDybQnYrLQfEpzGOQe7OKcyLUCML4YOXl428gOOzBgN6Gw0rv8dpsJ7PqTHxBnXnwXr8S1mYFSLSa727tpz0xg==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.3.tgz",
"integrity": "sha512-+CyRcpagHMGteySaWos8IbnXcHgfDn7pO2fiC2slJxvNq9gDipYBN42/RagzctVRKgxATmfqOSulgZv5e1RdMg==",
"cpu": [
"wasm32"
],
"license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT",
"optional": true,
"dependencies": {
"@emnapi/runtime": "^1.4.0"
"@emnapi/runtime": "^1.4.4"
},
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
@ -1265,10 +1287,29 @@
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-win32-arm64": {
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.3.tgz",
"integrity": "sha512-MjnHPnbqMXNC2UgeLJtX4XqoVHHlZNd+nPt1kRPmj63wURegwBhZlApELdtxM2OIZDRv/DFtLcNhVbd1z8GYXQ==",
"cpu": [
"arm64"
],
"license": "Apache-2.0 AND LGPL-3.0-or-later",
"optional": true,
"os": [
"win32"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-win32-ia32": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.1.tgz",
"integrity": "sha512-WKf/NAZITnonBf3U1LfdjoMgNO5JYRSlhovhRhMxXVdvWYveM4kM3L8m35onYIdh75cOMCo1BexgVQcCDzyoWw==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.3.tgz",
"integrity": "sha512-xuCdhH44WxuXgOM714hn4amodJMZl3OEvf0GVTm0BEyMeA2to+8HEdRPShH0SLYptJY1uBw+SCFP9WVQi1Q/cw==",
"cpu": [
"ia32"
],
@ -1285,9 +1326,9 @@
}
},
"node_modules/@img/sharp-win32-x64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.1.tgz",
"integrity": "sha512-hw1iIAHpNE8q3uMIRCgGOeDoz9KtFNarFLQclLxr/LK1VBkj8nby18RjFvr6aP7USRYAjTZW6yisnBWMX571Tw==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.3.tgz",
"integrity": "sha512-OWwz05d++TxzLEv4VnsTz5CmZ6mI6S05sfQGEMrNrQcOEERbX46332IvE7pO/EUiw7jUrrS40z/M7kPyjfl04g==",
"cpu": [
"x64"
],
@ -1849,9 +1890,10 @@
}
},
"node_modules/@next/env": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/@next/env/-/env-15.3.3.tgz",
"integrity": "sha512-OdiMrzCl2Xi0VTjiQQUK0Xh7bJHnOuET2s+3V+Y40WJBAXrJeGA3f+I8MZJ/YQ3mVGi5XGR1L66oFlgqXhQ4Vw=="
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.3.tgz",
"integrity": "sha512-RSEDTRqyihYXygx/OJXwvVupfr9m04+0vH8vyy0HfZ7keRto6VX9BbEk0J2PUk0VGy6YhklJUSrgForov5F9pw==",
"license": "MIT"
},
"node_modules/@next/eslint-plugin-next": {
"version": "15.5.2",
@ -1864,12 +1906,13 @@
}
},
"node_modules/@next/swc-darwin-arm64": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.3.3.tgz",
"integrity": "sha512-WRJERLuH+O3oYB4yZNVahSVFmtxRNjNF1I1c34tYMoJb0Pve+7/RaLAJJizyYiFhjYNGHRAE1Ri2Fd23zgDqhg==",
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.3.tgz",
"integrity": "sha512-nzbHQo69+au9wJkGKTU9lP7PXv0d1J5ljFpvb+LnEomLtSbJkbZyEs6sbF3plQmiOB2l9OBtN2tNSvCH1nQ9Jg==",
"cpu": [
"arm64"
],
"license": "MIT",
"optional": true,
"os": [
"darwin"
@ -1879,12 +1922,13 @@
}
},
"node_modules/@next/swc-darwin-x64": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.3.3.tgz",
"integrity": "sha512-XHdzH/yBc55lu78k/XwtuFR/ZXUTcflpRXcsu0nKmF45U96jt1tsOZhVrn5YH+paw66zOANpOnFQ9i6/j+UYvw==",
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.3.tgz",
"integrity": "sha512-w83w4SkOOhekJOcA5HBvHyGzgV1W/XvOfpkrxIse4uPWhYTTRwtGEM4v/jiXwNSJvfRvah0H8/uTLBKRXlef8g==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"darwin"
@ -1894,12 +1938,13 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.3.3.tgz",
"integrity": "sha512-VZ3sYL2LXB8znNGcjhocikEkag/8xiLgnvQts41tq6i+wql63SMS1Q6N8RVXHw5pEUjiof+II3HkDd7GFcgkzw==",
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.3.tgz",
"integrity": "sha512-+m7pfIs0/yvgVu26ieaKrifV8C8yiLe7jVp9SpcIzg7XmyyNE7toC1fy5IOQozmr6kWl/JONC51osih2RyoXRw==",
"cpu": [
"arm64"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
@ -1909,12 +1954,13 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.3.3.tgz",
"integrity": "sha512-h6Y1fLU4RWAp1HPNJWDYBQ+e3G7sLckyBXhmH9ajn8l/RSMnhbuPBV/fXmy3muMcVwoJdHL+UtzRzs0nXOf9SA==",
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.3.tgz",
"integrity": "sha512-u3PEIzuguSenoZviZJahNLgCexGFhso5mxWCrrIMdvpZn6lkME5vc/ADZG8UUk5K1uWRy4hqSFECrON6UKQBbQ==",
"cpu": [
"arm64"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
@ -1924,12 +1970,13 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.3.3.tgz",
"integrity": "sha512-jJ8HRiF3N8Zw6hGlytCj5BiHyG/K+fnTKVDEKvUCyiQ/0r5tgwO7OgaRiOjjRoIx2vwLR+Rz8hQoPrnmFbJdfw==",
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.3.tgz",
"integrity": "sha512-lDtOOScYDZxI2BENN9m0pfVPJDSuUkAD1YXSvlJF0DKwZt0WlA7T7o3wrcEr4Q+iHYGzEaVuZcsIbCps4K27sA==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
@ -1939,12 +1986,13 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.3.3.tgz",
"integrity": "sha512-HrUcTr4N+RgiiGn3jjeT6Oo208UT/7BuTr7K0mdKRBtTbT4v9zJqCDKO97DUqqoBK1qyzP1RwvrWTvU6EPh/Cw==",
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.3.tgz",
"integrity": "sha512-9vWVUnsx9PrY2NwdVRJ4dUURAQ8Su0sLRPqcCCxtX5zIQUBES12eRVHq6b70bbfaVaxIDGJN2afHui0eDm+cLg==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
@ -1954,12 +2002,13 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.3.3.tgz",
"integrity": "sha512-SxorONgi6K7ZUysMtRF3mIeHC5aA3IQLmKFQzU0OuhuUYwpOBc1ypaLJLP5Bf3M9k53KUUUj4vTPwzGvl/NwlQ==",
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.3.tgz",
"integrity": "sha512-1CU20FZzY9LFQigRi6jM45oJMU3KziA5/sSG+dXeVaTm661snQP6xu3ykGxxwU5sLG3sh14teO/IOEPVsQMRfA==",
"cpu": [
"arm64"
],
"license": "MIT",
"optional": true,
"os": [
"win32"
@ -1969,12 +2018,13 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.3.3.tgz",
"integrity": "sha512-4QZG6F8enl9/S2+yIiOiju0iCTFd93d8VC1q9LZS4p/Xuk81W2QDjCFeoogmrWWkAD59z8ZxepBQap2dKS5ruw==",
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.3.tgz",
"integrity": "sha512-JMoLAq3n3y5tKXPQwCK5c+6tmwkuFDa2XAxz8Wm4+IVthdBZdZGh+lmiLUHg9f9IDwIQpUjp+ysd6OkYTyZRZw==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"win32"
@ -2874,22 +2924,22 @@
}
},
"node_modules/@radix-ui/react-select": {
"version": "2.2.5",
"resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.5.tgz",
"integrity": "sha512-HnMTdXEVuuyzx63ME0ut4+sEMYW6oouHWNGUZc7ddvUWIcfCva/AMoqEW/3wnEllriMWBa0RHspCYnfCWJQYmA==",
"version": "2.2.6",
"resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.6.tgz",
"integrity": "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==",
"license": "MIT",
"dependencies": {
"@radix-ui/number": "1.1.1",
"@radix-ui/primitive": "1.1.2",
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-collection": "1.1.7",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-direction": "1.1.1",
"@radix-ui/react-dismissable-layer": "1.1.10",
"@radix-ui/react-focus-guards": "1.1.2",
"@radix-ui/react-dismissable-layer": "1.1.11",
"@radix-ui/react-focus-guards": "1.1.3",
"@radix-ui/react-focus-scope": "1.1.7",
"@radix-ui/react-id": "1.1.1",
"@radix-ui/react-popper": "1.2.7",
"@radix-ui/react-popper": "1.2.8",
"@radix-ui/react-portal": "1.1.9",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-slot": "1.2.3",
@ -2916,13 +2966,19 @@
}
}
},
"node_modules/@radix-ui/react-select/node_modules/@radix-ui/primitive": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.3.tgz",
"integrity": "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==",
"license": "MIT"
},
"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-dismissable-layer": {
"version": "1.1.10",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.10.tgz",
"integrity": "sha512-IM1zzRV4W3HtVgftdQiiOmA0AdJlCtMLe00FXaHwgt3rAnNsIyDqshvkIW3hj/iu5hu8ERP7KIYki6NkqDxAwQ==",
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.11.tgz",
"integrity": "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==",
"license": "MIT",
"dependencies": {
"@radix-ui/primitive": "1.1.2",
"@radix-ui/primitive": "1.1.3",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
@ -2943,6 +2999,21 @@
}
}
},
"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-focus-guards": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.3.tgz",
"integrity": "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==",
"license": "MIT",
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-focus-scope": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz",
@ -2968,38 +3039,6 @@
}
}
},
"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-popper": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.7.tgz",
"integrity": "sha512-IUFAccz1JyKcf/RjB552PlWwxjeCJB8/4KxT7EhBHOJM+mN7LdW+B3kacJXILm32xawcMMjb2i0cIZpo+f9kiQ==",
"license": "MIT",
"dependencies": {
"@floating-ui/react-dom": "^2.0.0",
"@radix-ui/react-arrow": "1.1.7",
"@radix-ui/react-compose-refs": "1.1.2",
"@radix-ui/react-context": "1.1.2",
"@radix-ui/react-primitive": "2.1.3",
"@radix-ui/react-use-callback-ref": "1.1.1",
"@radix-ui/react-use-layout-effect": "1.1.1",
"@radix-ui/react-use-rect": "1.1.1",
"@radix-ui/react-use-size": "1.1.1",
"@radix-ui/rect": "1.1.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-portal": {
"version": "1.1.9",
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz",
@ -3547,12 +3586,6 @@
"@sinonjs/commons": "^3.0.0"
}
},
"node_modules/@swc/counter": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz",
"integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==",
"license": "Apache-2.0"
},
"node_modules/@swc/helpers": {
"version": "0.5.15",
"resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz",
@ -5475,17 +5508,6 @@
"dev": true,
"license": "MIT"
},
"node_modules/busboy": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
"integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==",
"dependencies": {
"streamsearch": "^1.1.0"
},
"engines": {
"node": ">=10.16.0"
}
},
"node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
@ -8295,9 +8317,9 @@
}
},
"node_modules/is-arrayish": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
"integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==",
"version": "0.3.4",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.4.tgz",
"integrity": "sha512-m6UrgzFVUYawGBh1dUsWR5M2Clqic9RVXC/9f8ceNlv2IcO9j9J/z8UoCLPqtsPBFNzEpfR3xftohbfqDx8EQA==",
"license": "MIT",
"optional": true
},
@ -11542,14 +11564,13 @@
}
},
"node_modules/next": {
"version": "15.3.3",
"resolved": "https://registry.npmjs.org/next/-/next-15.3.3.tgz",
"integrity": "sha512-JqNj29hHNmCLtNvd090SyRbXJiivQ+58XjCcrC50Crb5g5u2zi7Y2YivbsEfzk6AtVI80akdOQbaMZwWB1Hthw==",
"version": "15.5.3",
"resolved": "https://registry.npmjs.org/next/-/next-15.5.3.tgz",
"integrity": "sha512-r/liNAx16SQj4D+XH/oI1dlpv9tdKJ6cONYPwwcCC46f2NjpaRWY+EKCzULfgQYV6YKXjHBchff2IZBSlZmJNw==",
"license": "MIT",
"dependencies": {
"@next/env": "15.3.3",
"@swc/counter": "0.1.3",
"@next/env": "15.5.3",
"@swc/helpers": "0.5.15",
"busboy": "1.6.0",
"caniuse-lite": "^1.0.30001579",
"postcss": "8.4.31",
"styled-jsx": "5.1.6"
@ -11561,19 +11582,19 @@
"node": "^18.18.0 || ^19.8.0 || >= 20.0.0"
},
"optionalDependencies": {
"@next/swc-darwin-arm64": "15.3.3",
"@next/swc-darwin-x64": "15.3.3",
"@next/swc-linux-arm64-gnu": "15.3.3",
"@next/swc-linux-arm64-musl": "15.3.3",
"@next/swc-linux-x64-gnu": "15.3.3",
"@next/swc-linux-x64-musl": "15.3.3",
"@next/swc-win32-arm64-msvc": "15.3.3",
"@next/swc-win32-x64-msvc": "15.3.3",
"sharp": "^0.34.1"
"@next/swc-darwin-arm64": "15.5.3",
"@next/swc-darwin-x64": "15.5.3",
"@next/swc-linux-arm64-gnu": "15.5.3",
"@next/swc-linux-arm64-musl": "15.5.3",
"@next/swc-linux-x64-gnu": "15.5.3",
"@next/swc-linux-x64-musl": "15.5.3",
"@next/swc-win32-arm64-msvc": "15.5.3",
"@next/swc-win32-x64-msvc": "15.5.3",
"sharp": "^0.34.3"
},
"peerDependencies": {
"@opentelemetry/api": "^1.1.0",
"@playwright/test": "^1.41.2",
"@playwright/test": "^1.51.1",
"babel-plugin-react-compiler": "*",
"react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0",
"react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0",
@ -13240,16 +13261,16 @@
"license": "ISC"
},
"node_modules/sharp": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.1.tgz",
"integrity": "sha512-1j0w61+eVxu7DawFJtnfYcvSv6qPFvfTaqzTQ2BLknVhHTwGS8sc63ZBF4rzkWMBVKybo4S5OBtDdZahh2A1xg==",
"version": "0.34.3",
"resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.3.tgz",
"integrity": "sha512-eX2IQ6nFohW4DbvHIOLRB3MHFpYqaqvXd3Tp5e/T/dSH83fxaNJQRvDMhASmkNTsNTVF2/OOopzRCt7xokgPfg==",
"hasInstallScript": true,
"license": "Apache-2.0",
"optional": true,
"dependencies": {
"color": "^4.2.3",
"detect-libc": "^2.0.3",
"semver": "^7.7.1"
"detect-libc": "^2.0.4",
"semver": "^7.7.2"
},
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
@ -13258,26 +13279,28 @@
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-darwin-arm64": "0.34.1",
"@img/sharp-darwin-x64": "0.34.1",
"@img/sharp-libvips-darwin-arm64": "1.1.0",
"@img/sharp-libvips-darwin-x64": "1.1.0",
"@img/sharp-libvips-linux-arm": "1.1.0",
"@img/sharp-libvips-linux-arm64": "1.1.0",
"@img/sharp-libvips-linux-ppc64": "1.1.0",
"@img/sharp-libvips-linux-s390x": "1.1.0",
"@img/sharp-libvips-linux-x64": "1.1.0",
"@img/sharp-libvips-linuxmusl-arm64": "1.1.0",
"@img/sharp-libvips-linuxmusl-x64": "1.1.0",
"@img/sharp-linux-arm": "0.34.1",
"@img/sharp-linux-arm64": "0.34.1",
"@img/sharp-linux-s390x": "0.34.1",
"@img/sharp-linux-x64": "0.34.1",
"@img/sharp-linuxmusl-arm64": "0.34.1",
"@img/sharp-linuxmusl-x64": "0.34.1",
"@img/sharp-wasm32": "0.34.1",
"@img/sharp-win32-ia32": "0.34.1",
"@img/sharp-win32-x64": "0.34.1"
"@img/sharp-darwin-arm64": "0.34.3",
"@img/sharp-darwin-x64": "0.34.3",
"@img/sharp-libvips-darwin-arm64": "1.2.0",
"@img/sharp-libvips-darwin-x64": "1.2.0",
"@img/sharp-libvips-linux-arm": "1.2.0",
"@img/sharp-libvips-linux-arm64": "1.2.0",
"@img/sharp-libvips-linux-ppc64": "1.2.0",
"@img/sharp-libvips-linux-s390x": "1.2.0",
"@img/sharp-libvips-linux-x64": "1.2.0",
"@img/sharp-libvips-linuxmusl-arm64": "1.2.0",
"@img/sharp-libvips-linuxmusl-x64": "1.2.0",
"@img/sharp-linux-arm": "0.34.3",
"@img/sharp-linux-arm64": "0.34.3",
"@img/sharp-linux-ppc64": "0.34.3",
"@img/sharp-linux-s390x": "0.34.3",
"@img/sharp-linux-x64": "0.34.3",
"@img/sharp-linuxmusl-arm64": "0.34.3",
"@img/sharp-linuxmusl-x64": "0.34.3",
"@img/sharp-wasm32": "0.34.3",
"@img/sharp-win32-arm64": "0.34.3",
"@img/sharp-win32-ia32": "0.34.3",
"@img/sharp-win32-x64": "0.34.3"
}
},
"node_modules/shebang-command": {
@ -13403,9 +13426,9 @@
"license": "ISC"
},
"node_modules/simple-swizzle": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
"integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==",
"version": "0.2.4",
"resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.4.tgz",
"integrity": "sha512-nAu1WFPQSMNr2Zn9PGSZK9AGn4t/y97lEm+MXTtUDwfP0ksAIX4nO+6ruD9Jwut4C49SB1Ws+fbXsm/yScWOHw==",
"license": "MIT",
"optional": true,
"dependencies": {
@ -13526,14 +13549,6 @@
"node": ">= 0.8"
}
},
"node_modules/streamsearch": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz",
"integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==",
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/string-length": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz",

View file

@ -16,7 +16,7 @@
"@radix-ui/react-collapsible": "^1.1.12",
"@radix-ui/react-dialog": "^1.1.13",
"@radix-ui/react-dropdown-menu": "^2.1.16",
"@radix-ui/react-select": "^2.2.5",
"@radix-ui/react-select": "^2.2.6",
"@radix-ui/react-separator": "^1.1.7",
"@radix-ui/react-slot": "^1.2.3",
"@radix-ui/react-tooltip": "^1.2.8",
@ -25,7 +25,7 @@
"framer-motion": "^12.23.12",
"llama-stack-client": "^0.2.21",
"lucide-react": "^0.542.0",
"next": "15.3.3",
"next": "15.5.3",
"next-auth": "^4.24.11",
"next-themes": "^0.4.6",
"react": "^19.0.0",

View file

@ -354,6 +354,7 @@ warn_required_dynamic_aliases = true
classmethod-decorators = ["classmethod", "pydantic.field_validator"]
[tool.pytest.ini_options]
addopts = ["--durations=10"]
asyncio_mode = "auto"
markers = [
"allow_network: Allow network access for specific unit tests",

View file

@ -48,7 +48,6 @@ def skip_if_model_doesnt_support_openai_completion(client_with_models, model_id)
"remote::nvidia",
"remote::runpod",
"remote::sambanova",
"remote::tgi",
"remote::vertexai",
# {"error":{"message":"Unknown request URL: GET /openai/v1/completions. Please check the URL for typos,
# or see the docs at https://console.groq.com/docs/","type":"invalid_request_error","code":"unknown_url"}}
@ -59,6 +58,7 @@ def skip_if_model_doesnt_support_openai_completion(client_with_models, model_id)
# does not work with the specified model, gpt-5-mini. Please choose different model and try
# again. You can learn more about which models can be used with each operation here:
# https://go.microsoft.com/fwlink/?linkid=2197993.'}}"}
"remote::watsonx", # return 404 when hitting the /openai/v1 endpoint
):
pytest.skip(f"Model {model_id} hosted by {provider.provider_type} doesn't support OpenAI completions.")
@ -96,6 +96,7 @@ def skip_if_doesnt_support_n(client_with_models, model_id):
"remote::vertexai",
# Error code: 400 - [{'error': {'code': 400, 'message': 'Unable to submit request because candidateCount must be 1 but
# the entered value was 2. Update the candidateCount value and try again.', 'status': 'INVALID_ARGUMENT'}
"remote::tgi", # TGI ignores n param silently
):
pytest.skip(f"Model {model_id} hosted by {provider.provider_type} doesn't support n param.")
@ -110,7 +111,7 @@ def skip_if_model_doesnt_support_openai_chat_completion(client_with_models, mode
"remote::cerebras",
"remote::databricks",
"remote::runpod",
"remote::tgi",
"remote::watsonx", # watsonx returns 404 when hitting the /openai/v1 endpoint
):
pytest.skip(f"Model {model_id} hosted by {provider.provider_type} doesn't support OpenAI chat completions.")

View file

@ -45,7 +45,7 @@ def skip_if_model_doesnt_support_json_schema_structured_output(client_with_model
provider_id = models[model_id].provider_id
providers = {p.provider_id: p for p in client_with_models.providers.list()}
provider = providers[provider_id]
if provider.provider_type in ("remote::sambanova", "remote::azure"):
if provider.provider_type in ("remote::sambanova", "remote::azure", "remote::watsonx"):
pytest.skip(
f"Model {model_id} hosted by {provider.provider_type} doesn't support json_schema structured output"
)
@ -211,6 +211,7 @@ def test_text_completion_log_probs_streaming(client_with_models, text_model_id,
)
def test_text_completion_structured_output(client_with_models, text_model_id, test_case):
skip_if_model_doesnt_support_completion(client_with_models, text_model_id)
skip_if_model_doesnt_support_json_schema_structured_output(client_with_models, text_model_id)
class AnswerFormat(BaseModel):
name: str

View file

@ -0,0 +1,56 @@
{
"request": {
"method": "POST",
"url": "http://localhost:8080/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "Qwen/Qwen3-0.6B",
"messages": [
{
"role": "user",
"content": "Hello, world!"
}
],
"stream": false
},
"endpoint": "/v1/chat/completions",
"model": "Qwen/Qwen3-0.6B"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "<think>\nOkay, the user just said \"Hello, world!\" so I need to respond in a friendly way. My prompt says to respond in the same style, so I should start with \"Hello, world!\" but maybe add some helpful information. Let me think. Since the user is probably testing or just sharing, a simple \"Hello, world!\" with a question would be best for user interaction. I'll make sure to keep it positive and open-ended.\n</think>\n\nHello, world! \ud83d\ude0a What do you need today?",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": null
}
}
],
"created": 1757550395,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": {
"completion_tokens": 108,
"prompt_tokens": 12,
"total_tokens": 120,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,56 @@
{
"request": {
"method": "POST",
"url": "http://localhost:8080/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "Qwen/Qwen3-0.6B",
"messages": [
{
"role": "user",
"content": "Which planet has rings around it with a name starting with letter S?"
}
],
"stream": false
},
"endpoint": "/v1/chat/completions",
"model": "Qwen/Qwen3-0.6B"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "<think>\nOkay, so the user is asking which planet has rings around it and its name starts with the letter S. Let me think... I know that the Sun is a star, not a planet. So the Moon is a natural satellite, which has the Moon's name and rings. But the Moon's name starts with M, not S. The Earth has the name Earth, but the rings aren't really around the Earth in any real sense. Mars has a thin ring of dust. Venus and Mercury don't have rings in the sense of planetary rings as we know. Wait, maybe the answer is the Moon, even though it's not the same as the name starting with S. But the question says a planet, so if there's a planet named S, that would be it. But actually, the only planet with rings is Jupiter. Wait, Jupiter has a famous system of rings. But why does the question mention a planet with a name starting with S? Maybe there's a trick. Let me double-check. Jupiter's name starts with J, so maybe the answer is Venus? But Venus doesn't have rings. Mercury, too, doesn't. The Moon, as a planet, a dwarf planet, and has rings. Despite the name, the rings are around it. So the answer would be the Moon. Therefore, the planet with rings and name starting with S is the Moon.\n</think>\n\nThe planet with rings around it and a name starting with the letter **S** is the **Moon**. Though its name doesn't start with an **S**, it is technically a dwarf planet and has the rings in its orbit. Oops Saturn!",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": null
}
}
],
"created": 1757550394,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": {
"completion_tokens": 336,
"prompt_tokens": 22,
"total_tokens": 358,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,366 @@
{
"request": {
"method": "POST",
"url": "http://localhost:8080/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "Qwen/Qwen3-0.6B",
"messages": [
{
"role": "user",
"content": "What's the weather in Tokyo? Use the get_weather function to get the weather."
}
],
"stream": true,
"tools": [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the weather in a given city",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The city to get the weather for"
}
}
}
}
}
]
},
"endpoint": "/v1/chat/completions",
"model": "Qwen/Qwen3-0.6B"
},
"response": {
"body": [
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "0",
"function": {
"arguments": "{",
"name": "get_weather"
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1757550392,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "0",
"function": {
"arguments": " \"",
"name": null
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1757550392,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "0",
"function": {
"arguments": "c",
"name": null
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1757550392,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "0",
"function": {
"arguments": "ity",
"name": null
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1757550392,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "0",
"function": {
"arguments": "\":",
"name": null
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1757550392,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "0",
"function": {
"arguments": " \"",
"name": null
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1757550392,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "0",
"function": {
"arguments": "Tok",
"name": null
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1757550392,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "0",
"function": {
"arguments": "yo",
"name": null
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1757550392,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "0",
"function": {
"arguments": "\"}",
"name": null
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1757550392,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
}
],
"is_streaming": true
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,976 @@
{
"request": {
"method": "POST",
"url": "http://localhost:8080/v1/v1/completions",
"headers": {},
"body": {
"model": "Qwen/Qwen3-0.6B",
"prompt": "Respond to this question and explain your answer. Complete the sentence using one word: Roses are red, violets are ",
"max_tokens": 50,
"stream": true
},
"endpoint": "/v1/completions",
"model": "Qwen/Qwen3-0.6B"
},
"response": {
"body": [
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " several"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " several"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " times"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " more"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " popular"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " than"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " ____"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": ".\n"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "Answer"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": ":\n\n"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "The"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " roses"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " are"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " red"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": ","
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " v"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "io"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "lets"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " are"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " several"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " several"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " times"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " more"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " popular"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " than"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " **"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "numbers"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "**"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": ".\n\n"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "Explanation"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": ":"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " \""
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "se"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "veral"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " several"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " times"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " more"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " popular"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " than"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "\""
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " can"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " be"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " replaced"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " with"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " \""
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "numbers"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": "\""
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " as"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "",
"index": 0,
"logprobs": null,
"text": " the"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": null
}
},
{
"__type__": "openai.types.completion.Completion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"text": " number"
}
],
"created": 1757550367,
"model": "Qwen/Qwen3-0.6B",
"object": "text_completion",
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": {
"completion_tokens": 50,
"prompt_tokens": 25,
"total_tokens": 75,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
}
],
"is_streaming": true
}
}

View file

@ -0,0 +1,84 @@
{
"request": {
"method": "POST",
"url": "http://localhost:8080/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "Qwen/Qwen3-0.6B",
"messages": [
{
"role": "user",
"content": "What's the weather in Tokyo? Use the get_weather function to get the weather."
}
],
"stream": false,
"tools": [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the weather in a given city",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The city to get the weather for"
}
}
}
}
}
]
},
"endpoint": "/v1/chat/completions",
"model": "Qwen/Qwen3-0.6B"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": null,
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": [
{
"id": "0",
"function": {
"arguments": "{\"city\":\"Tokyo\"}",
"name": "get_weather",
"description": null
},
"type": "function"
}
]
}
}
],
"created": 1757550396,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": {
"completion_tokens": 19,
"prompt_tokens": 239,
"total_tokens": 258,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}

View file

@ -0,0 +1,56 @@
{
"request": {
"method": "POST",
"url": "http://localhost:8080/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "Qwen/Qwen3-0.6B",
"messages": [
{
"role": "user",
"content": "Which planet do humans live on?"
}
],
"stream": false
},
"endpoint": "/v1/chat/completions",
"model": "Qwen/Qwen3-0.6B"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "<think>\nOkay, the user is asking which planet humans live on. I need to make sure I answer this accurately. First, I should recall what I know about our solar system. The Earth is our home, and it's in our solar system. There are eight planets in total, right? Let me check that. Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, and Neptune. Yep, that's the list.\n\nBut wait, the user might be confusing Earth with Mars. I should clarify that Earth is the only planet known to support life. The other planets are mostly gas giants and have no liquid water, so they don't support life as Earth does. So the answer should be Earth. I should also mention that although there are other planets, none have liquid water, which makes the answer more complete.\n\nI need to make sure there are no alternatives. Maybe some people might think Mars, but I know that's not the case. Also, it's good to mention that life on Earth is closely linked to the presence of water, which is why Earth is our only planet with that characteristic. That way, the answer is not only accurate but also informative.\n</think>\n\nHumans live on **Earth**, the planet that supports life as we know it. The Earth is the only known planet in our solar system where liquid water exists and where life can occur. Other planets are considered \"gas giants\" or \"ice giants\" due to their extreme conditions and lack of liquid water, making them inhospitable for life.",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": null
}
}
],
"created": 1757550390,
"model": "Qwen/Qwen3-0.6B",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "3.3.5-dev0-sha-1b90c50",
"usage": {
"completion_tokens": 312,
"prompt_tokens": 15,
"total_tokens": 327,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}

File diff suppressed because it is too large Load diff

View file

@ -9,6 +9,7 @@ from pathlib import Path
import pandas as pd
import pytest
import requests
@pytest.fixture
@ -77,7 +78,46 @@ def test_scoring_functions_register(
assert len(list_response) > 0
assert any(x.identifier == sample_scoring_fn_id for x in list_response)
# TODO: add unregister api for scoring functions
def test_scoring_functions_unregister(
llama_stack_client,
sample_scoring_fn_id,
judge_model_id,
sample_judge_prompt_template,
):
llm_as_judge_provider = [
x
for x in llama_stack_client.providers.list()
if x.api == "scoring" and x.provider_type == "inline::llm-as-judge"
]
if len(llm_as_judge_provider) == 0:
pytest.skip("No llm-as-judge provider found, cannot test unregister")
llm_as_judge_provider_id = llm_as_judge_provider[0].provider_id
# Register first
register_scoring_function(
llama_stack_client,
llm_as_judge_provider_id,
sample_scoring_fn_id,
judge_model_id,
sample_judge_prompt_template,
)
# Ensure it is present
list_response = llama_stack_client.scoring_functions.list()
assert any(x.identifier == sample_scoring_fn_id for x in list_response)
# Unregister scoring fn
try:
base_url = llama_stack_client.base_url
except AttributeError:
pytest.skip("No server base_url available; cannot test HTTP unregister in library mode")
resp = requests.delete(f"{base_url}/v1/scoring-functions/{sample_scoring_fn_id}", timeout=30)
assert resp.status_code in (200, 204)
list_after = llama_stack_client.scoring_functions.list()
assert all(x.identifier != sample_scoring_fn_id for x in list_after)
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])

View file

@ -90,6 +90,16 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
"embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
},
),
"tgi": Setup(
name="tgi",
description="Text Generation Inference (TGI) provider with a text model",
env={
"TGI_URL": "http://localhost:8080",
},
defaults={
"text_model": "tgi/Qwen/Qwen3-0.6B",
},
),
}

View file

@ -105,6 +105,9 @@ class ScoringFunctionsImpl(Impl):
async def register_scoring_function(self, scoring_fn):
return scoring_fn
async def unregister_scoring_function(self, scoring_fn_id: str):
return scoring_fn_id
class BenchmarksImpl(Impl):
def __init__(self):
@ -113,6 +116,9 @@ class BenchmarksImpl(Impl):
async def register_benchmark(self, benchmark):
return benchmark
async def unregister_benchmark(self, benchmark_id: str):
return benchmark_id
class ToolGroupsImpl(Impl):
def __init__(self):
@ -330,6 +336,13 @@ async def test_scoring_functions_routing_table(cached_disk_dist_registry):
assert "test-scoring-fn" in scoring_fn_ids
assert "test-scoring-fn-2" in scoring_fn_ids
# Unregister scoring functions and verify listing
for i in range(len(scoring_functions.data)):
await table.unregister_scoring_function(scoring_functions.data[i].scoring_fn_id)
scoring_functions_list_after_deletion = await table.list_scoring_functions()
assert len(scoring_functions_list_after_deletion.data) == 0
async def test_benchmarks_routing_table(cached_disk_dist_registry):
table = BenchmarksRoutingTable({"test_provider": BenchmarksImpl()}, cached_disk_dist_registry, {})
@ -347,6 +360,15 @@ async def test_benchmarks_routing_table(cached_disk_dist_registry):
benchmark_ids = {b.identifier for b in benchmarks.data}
assert "test-benchmark" in benchmark_ids
# Unregister the benchmark and verify removal
await table.unregister_benchmark(benchmark_id="test-benchmark")
benchmarks_after = await table.list_benchmarks()
assert len(benchmarks_after.data) == 0
# Unregistering a non-existent benchmark should raise a clear error
with pytest.raises(ValueError, match="Benchmark 'dummy_benchmark' not found"):
await table.unregister_benchmark(benchmark_id="dummy_benchmark")
async def test_tool_groups_routing_table(cached_disk_dist_registry):
table = ToolGroupsRoutingTable({"test_provider": ToolGroupsImpl()}, cached_disk_dist_registry, {})

View file

@ -6,16 +6,18 @@
import tempfile
from pathlib import Path
from unittest.mock import patch
from unittest.mock import AsyncMock, Mock, patch
import pytest
from openai import AsyncOpenAI
from openai import NOT_GIVEN, AsyncOpenAI
from openai.types.model import Model as OpenAIModel
# Import the real Pydantic response types instead of using Mocks
from llama_stack.apis.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChoice,
OpenAICompletion,
OpenAIEmbeddingData,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
@ -153,24 +155,22 @@ class TestInferenceRecording:
async def test_recording_mode(self, temp_storage_dir, real_openai_chat_response):
"""Test that recording mode captures and stores responses."""
async def mock_create(*args, **kwargs):
return real_openai_chat_response
temp_storage_dir = temp_storage_dir / "test_recording_mode"
with patch("openai.resources.chat.completions.AsyncCompletions.create", side_effect=mock_create):
with inference_recording(mode=InferenceMode.RECORD, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
with inference_recording(mode=InferenceMode.RECORD, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.chat.completions._post = AsyncMock(return_value=real_openai_chat_response)
response = await client.chat.completions.create(
model="llama3.2:3b",
messages=[{"role": "user", "content": "Hello, how are you?"}],
temperature=0.7,
max_tokens=50,
)
response = await client.chat.completions.create(
model="llama3.2:3b",
messages=[{"role": "user", "content": "Hello, how are you?"}],
temperature=0.7,
max_tokens=50,
user=NOT_GIVEN,
)
# Verify the response was returned correctly
assert response.choices[0].message.content == "Hello! I'm doing well, thank you for asking."
# Verify the response was returned correctly
assert response.choices[0].message.content == "Hello! I'm doing well, thank you for asking."
client.chat.completions._post.assert_called_once()
# Verify recording was stored
storage = ResponseStorage(temp_storage_dir)
@ -178,40 +178,74 @@ class TestInferenceRecording:
async def test_replay_mode(self, temp_storage_dir, real_openai_chat_response):
"""Test that replay mode returns stored responses without making real calls."""
async def mock_create(*args, **kwargs):
return real_openai_chat_response
temp_storage_dir = temp_storage_dir / "test_replay_mode"
# First, record a response
with patch("openai.resources.chat.completions.AsyncCompletions.create", side_effect=mock_create):
with inference_recording(mode=InferenceMode.RECORD, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
with inference_recording(mode=InferenceMode.RECORD, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.chat.completions._post = AsyncMock(return_value=real_openai_chat_response)
response = await client.chat.completions.create(
model="llama3.2:3b",
messages=[{"role": "user", "content": "Hello, how are you?"}],
temperature=0.7,
max_tokens=50,
)
response = await client.chat.completions.create(
model="llama3.2:3b",
messages=[{"role": "user", "content": "Hello, how are you?"}],
temperature=0.7,
max_tokens=50,
user=NOT_GIVEN,
)
client.chat.completions._post.assert_called_once()
# Now test replay mode - should not call the original method
with patch("openai.resources.chat.completions.AsyncCompletions.create") as mock_create_patch:
with inference_recording(mode=InferenceMode.REPLAY, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
with inference_recording(mode=InferenceMode.REPLAY, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.chat.completions._post = AsyncMock(return_value=real_openai_chat_response)
response = await client.chat.completions.create(
model="llama3.2:3b",
messages=[{"role": "user", "content": "Hello, how are you?"}],
temperature=0.7,
max_tokens=50,
)
response = await client.chat.completions.create(
model="llama3.2:3b",
messages=[{"role": "user", "content": "Hello, how are you?"}],
temperature=0.7,
max_tokens=50,
)
# Verify we got the recorded response
assert response.choices[0].message.content == "Hello! I'm doing well, thank you for asking."
# Verify we got the recorded response
assert response.choices[0].message.content == "Hello! I'm doing well, thank you for asking."
# Verify the original method was NOT called
mock_create_patch.assert_not_called()
# Verify the original method was NOT called
client.chat.completions._post.assert_not_called()
async def test_replay_mode_models(self, temp_storage_dir):
"""Test that replay mode returns stored responses without making real model listing calls."""
async def _async_iterator(models):
for model in models:
yield model
models = [
OpenAIModel(id="foo", created=1, object="model", owned_by="test"),
OpenAIModel(id="bar", created=2, object="model", owned_by="test"),
]
expected_ids = {m.id for m in models}
temp_storage_dir = temp_storage_dir / "test_replay_mode_models"
# baseline - mock works without recording
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.models._get_api_list = Mock(return_value=_async_iterator(models))
assert {m.id async for m in client.models.list()} == expected_ids
client.models._get_api_list.assert_called_once()
# record the call
with inference_recording(mode=InferenceMode.RECORD, storage_dir=temp_storage_dir):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.models._get_api_list = Mock(return_value=_async_iterator(models))
assert {m.id async for m in client.models.list()} == expected_ids
client.models._get_api_list.assert_called_once()
# replay the call
with inference_recording(mode=InferenceMode.REPLAY, storage_dir=temp_storage_dir):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.models._get_api_list = Mock(return_value=_async_iterator(models))
assert {m.id async for m in client.models.list()} == expected_ids
client.models._get_api_list.assert_not_called()
async def test_replay_missing_recording(self, temp_storage_dir):
"""Test that replay mode fails when no recording is found."""
@ -228,36 +262,110 @@ class TestInferenceRecording:
async def test_embeddings_recording(self, temp_storage_dir, real_embeddings_response):
"""Test recording and replay of embeddings calls."""
async def mock_create(*args, **kwargs):
return real_embeddings_response
# baseline - mock works without recording
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.embeddings._post = AsyncMock(return_value=real_embeddings_response)
response = await client.embeddings.create(
model=real_embeddings_response.model,
input=["Hello world", "Test embedding"],
encoding_format=NOT_GIVEN,
)
assert len(response.data) == 2
assert response.data[0].embedding == [0.1, 0.2, 0.3]
client.embeddings._post.assert_called_once()
temp_storage_dir = temp_storage_dir / "test_embeddings_recording"
# Record
with patch("openai.resources.embeddings.AsyncEmbeddings.create", side_effect=mock_create):
with inference_recording(mode=InferenceMode.RECORD, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
with inference_recording(mode=InferenceMode.RECORD, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.embeddings._post = AsyncMock(return_value=real_embeddings_response)
response = await client.embeddings.create(
model="nomic-embed-text", input=["Hello world", "Test embedding"]
)
response = await client.embeddings.create(
model=real_embeddings_response.model,
input=["Hello world", "Test embedding"],
encoding_format=NOT_GIVEN,
dimensions=NOT_GIVEN,
user=NOT_GIVEN,
)
assert len(response.data) == 2
assert len(response.data) == 2
# Replay
with patch("openai.resources.embeddings.AsyncEmbeddings.create") as mock_create_patch:
with inference_recording(mode=InferenceMode.REPLAY, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
with inference_recording(mode=InferenceMode.REPLAY, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.embeddings._post = AsyncMock(return_value=real_embeddings_response)
response = await client.embeddings.create(
model="nomic-embed-text", input=["Hello world", "Test embedding"]
)
response = await client.embeddings.create(
model=real_embeddings_response.model,
input=["Hello world", "Test embedding"],
)
# Verify we got the recorded response
assert len(response.data) == 2
assert response.data[0].embedding == [0.1, 0.2, 0.3]
# Verify we got the recorded response
assert len(response.data) == 2
assert response.data[0].embedding == [0.1, 0.2, 0.3]
# Verify original method was not called
mock_create_patch.assert_not_called()
# Verify original method was not called
client.embeddings._post.assert_not_called()
async def test_completions_recording(self, temp_storage_dir):
real_completions_response = OpenAICompletion(
id="test_completion",
object="text_completion",
created=1234567890,
model="llama3.2:3b",
choices=[
{
"text": "Hello! I'm doing well, thank you for asking.",
"index": 0,
"logprobs": None,
"finish_reason": "stop",
}
],
)
temp_storage_dir = temp_storage_dir / "test_completions_recording"
# baseline - mock works without recording
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.completions._post = AsyncMock(return_value=real_completions_response)
response = await client.completions.create(
model=real_completions_response.model,
prompt="Hello, how are you?",
temperature=0.7,
max_tokens=50,
user=NOT_GIVEN,
)
assert response.choices[0].text == real_completions_response.choices[0].text
client.completions._post.assert_called_once()
# Record
with inference_recording(mode=InferenceMode.RECORD, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.completions._post = AsyncMock(return_value=real_completions_response)
response = await client.completions.create(
model=real_completions_response.model,
prompt="Hello, how are you?",
temperature=0.7,
max_tokens=50,
user=NOT_GIVEN,
)
assert response.choices[0].text == real_completions_response.choices[0].text
client.completions._post.assert_called_once()
# Replay
with inference_recording(mode=InferenceMode.REPLAY, storage_dir=str(temp_storage_dir)):
client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="test")
client.completions._post = AsyncMock(return_value=real_completions_response)
response = await client.completions.create(
model=real_completions_response.model,
prompt="Hello, how are you?",
temperature=0.7,
max_tokens=50,
)
assert response.choices[0].text == real_completions_response.choices[0].text
client.completions._post.assert_not_called()
async def test_live_mode(self, real_openai_chat_response):
"""Test that live mode passes through to original methods."""

View file

@ -6,11 +6,7 @@
import asyncio
import json
import logging # allow-direct-logging
import threading
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
import pytest
@ -18,7 +14,7 @@ from openai.types.chat.chat_completion_chunk import (
ChatCompletionChunk as OpenAIChatCompletionChunk,
)
from openai.types.chat.chat_completion_chunk import (
Choice as OpenAIChoice,
Choice as OpenAIChoiceChunk,
)
from openai.types.chat.chat_completion_chunk import (
ChoiceDelta as OpenAIChoiceDelta,
@ -35,6 +31,9 @@ from llama_stack.apis.inference import (
ChatCompletionRequest,
ChatCompletionResponseEventType,
CompletionMessage,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChoice,
SystemMessage,
ToolChoice,
ToolConfig,
@ -61,41 +60,6 @@ from llama_stack.providers.remote.inference.vllm.vllm import (
# -v -s --tb=short --disable-warnings
class MockInferenceAdapterWithSleep:
def __init__(self, sleep_time: int, response: dict[str, Any]):
self.httpd = None
class DelayedRequestHandler(BaseHTTPRequestHandler):
# ruff: noqa: N802
def do_POST(self):
time.sleep(sleep_time)
response_body = json.dumps(response).encode("utf-8")
self.send_response(code=200)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(response_body))
self.end_headers()
self.wfile.write(response_body)
self.request_handler = DelayedRequestHandler
def __enter__(self):
httpd = HTTPServer(("", 0), self.request_handler)
self.httpd = httpd
host, port = httpd.server_address
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True # stop server if this thread terminates
httpd_thread.start()
config = VLLMInferenceAdapterConfig(url=f"http://{host}:{port}")
inference_adapter = VLLMInferenceAdapter(config)
return inference_adapter
def __exit__(self, _exc_type, _exc_value, _traceback):
if self.httpd:
self.httpd.shutdown()
self.httpd.server_close()
@pytest.fixture(scope="module")
def mock_openai_models_list():
with patch("openai.resources.models.AsyncModels.list", new_callable=AsyncMock) as mock_list:
@ -201,7 +165,7 @@ async def test_tool_call_delta_empty_tool_call_buf():
async def mock_stream():
delta = OpenAIChoiceDelta(content="", tool_calls=None)
choices = [OpenAIChoice(delta=delta, finish_reason="stop", index=0)]
choices = [OpenAIChoiceChunk(delta=delta, finish_reason="stop", index=0)]
mock_chunk = OpenAIChatCompletionChunk(
id="chunk-1",
created=1,
@ -227,7 +191,7 @@ async def test_tool_call_delta_streaming_arguments_dict():
model="foo",
object="chat.completion.chunk",
choices=[
OpenAIChoice(
OpenAIChoiceChunk(
delta=OpenAIChoiceDelta(
content="",
tool_calls=[
@ -252,7 +216,7 @@ async def test_tool_call_delta_streaming_arguments_dict():
model="foo",
object="chat.completion.chunk",
choices=[
OpenAIChoice(
OpenAIChoiceChunk(
delta=OpenAIChoiceDelta(
content="",
tool_calls=[
@ -277,7 +241,9 @@ async def test_tool_call_delta_streaming_arguments_dict():
model="foo",
object="chat.completion.chunk",
choices=[
OpenAIChoice(delta=OpenAIChoiceDelta(content="", tool_calls=None), finish_reason="tool_calls", index=0)
OpenAIChoiceChunk(
delta=OpenAIChoiceDelta(content="", tool_calls=None), finish_reason="tool_calls", index=0
)
],
)
for chunk in [mock_chunk_1, mock_chunk_2, mock_chunk_3]:
@ -301,7 +267,7 @@ async def test_multiple_tool_calls():
model="foo",
object="chat.completion.chunk",
choices=[
OpenAIChoice(
OpenAIChoiceChunk(
delta=OpenAIChoiceDelta(
content="",
tool_calls=[
@ -326,7 +292,7 @@ async def test_multiple_tool_calls():
model="foo",
object="chat.completion.chunk",
choices=[
OpenAIChoice(
OpenAIChoiceChunk(
delta=OpenAIChoiceDelta(
content="",
tool_calls=[
@ -351,7 +317,9 @@ async def test_multiple_tool_calls():
model="foo",
object="chat.completion.chunk",
choices=[
OpenAIChoice(delta=OpenAIChoiceDelta(content="", tool_calls=None), finish_reason="tool_calls", index=0)
OpenAIChoiceChunk(
delta=OpenAIChoiceDelta(content="", tool_calls=None), finish_reason="tool_calls", index=0
)
],
)
for chunk in [mock_chunk_1, mock_chunk_2, mock_chunk_3]:
@ -395,59 +363,6 @@ async def test_process_vllm_chat_completion_stream_response_no_choices():
assert chunks[0].event.event_type.value == "start"
@pytest.mark.allow_network
def test_chat_completion_doesnt_block_event_loop(caplog):
loop = asyncio.new_event_loop()
loop.set_debug(True)
caplog.set_level(logging.WARNING)
# Log when event loop is blocked for more than 200ms
loop.slow_callback_duration = 0.5
# Sleep for 500ms in our delayed http response
sleep_time = 0.5
mock_model = Model(identifier="mock-model", provider_resource_id="mock-model", provider_id="vllm-inference")
mock_response = {
"id": "chatcmpl-abc123",
"object": "chat.completion",
"created": 1,
"modle": "mock-model",
"choices": [
{
"message": {"content": ""},
"logprobs": None,
"finish_reason": "stop",
"index": 0,
}
],
}
async def do_chat_completion():
await inference_adapter.chat_completion(
"mock-model",
[],
stream=False,
tools=None,
tool_config=ToolConfig(tool_choice=ToolChoice.auto),
)
with MockInferenceAdapterWithSleep(sleep_time, mock_response) as inference_adapter:
inference_adapter.model_store = AsyncMock()
inference_adapter.model_store.get_model.return_value = mock_model
loop.run_until_complete(inference_adapter.initialize())
# Clear the logs so far and run the actual chat completion we care about
caplog.clear()
loop.run_until_complete(do_chat_completion())
# Ensure we don't have any asyncio warnings in the captured log
# records from our chat completion call. A message gets logged
# here any time we exceed the slow_callback_duration configured
# above.
asyncio_warnings = [record.message for record in caplog.records if record.name == "asyncio"]
assert not asyncio_warnings
async def test_get_params_empty_tools(vllm_inference_adapter):
request = ChatCompletionRequest(
tools=[],
@ -696,3 +611,48 @@ async def test_health_status_failure(vllm_inference_adapter):
assert "Health check failed: Connection failed" in health_response["message"]
mock_models.list.assert_called_once()
async def test_openai_chat_completion_is_async(vllm_inference_adapter):
"""
Verify that openai_chat_completion is async and doesn't block the event loop.
To do this we mock the underlying inference with a sleep, start multiple
inference calls in parallel, and ensure the total time taken is less
than the sum of the individual sleep times.
"""
sleep_time = 0.5
async def mock_create(*args, **kwargs):
await asyncio.sleep(sleep_time)
return OpenAIChatCompletion(
id="chatcmpl-abc123",
created=1,
model="mock-model",
choices=[
OpenAIChoice(
message=OpenAIAssistantMessageParam(
content="nothing interesting",
),
finish_reason="stop",
index=0,
)
],
)
async def do_inference():
await vllm_inference_adapter.openai_chat_completion(
"mock-model", messages=["one fish", "two fish"], stream=False
)
with patch.object(VLLMInferenceAdapter, "client", new_callable=PropertyMock) as mock_create_client:
mock_client = MagicMock()
mock_client.chat.completions.create = AsyncMock(side_effect=mock_create)
mock_create_client.return_value = mock_client
start_time = time.time()
await asyncio.gather(do_inference(), do_inference(), do_inference(), do_inference())
total_time = time.time() - start_time
assert mock_create_client.call_count == 4 # no cheating
assert total_time < (sleep_time * 2), f"Total time taken: {total_time}s exceeded expected max"

View file

@ -52,14 +52,19 @@ class TestNVIDIAEvalImpl(unittest.TestCase):
self.evaluator_post_patcher = patch(
"llama_stack.providers.remote.eval.nvidia.eval.NVIDIAEvalImpl._evaluator_post"
)
self.evaluator_delete_patcher = patch(
"llama_stack.providers.remote.eval.nvidia.eval.NVIDIAEvalImpl._evaluator_delete"
)
self.mock_evaluator_get = self.evaluator_get_patcher.start()
self.mock_evaluator_post = self.evaluator_post_patcher.start()
self.mock_evaluator_delete = self.evaluator_delete_patcher.start()
def tearDown(self):
"""Clean up after each test."""
self.evaluator_get_patcher.stop()
self.evaluator_post_patcher.stop()
self.evaluator_delete_patcher.stop()
def _assert_request_body(self, expected_json):
"""Helper method to verify request body in Evaluator POST request is correct"""
@ -115,6 +120,13 @@ class TestNVIDIAEvalImpl(unittest.TestCase):
self.mock_evaluator_post.assert_called_once()
self._assert_request_body({"namespace": benchmark.provider_id, "name": benchmark.identifier, **eval_config})
def test_unregister_benchmark(self):
# Unregister the benchmark
self.run_async(self.eval_impl.unregister_benchmark(benchmark_id=MOCK_BENCHMARK_ID))
# Verify the Evaluator API was called correctly
self.mock_evaluator_delete.assert_called_once_with(f"/v1/evaluation/configs/nvidia/{MOCK_BENCHMARK_ID}")
def test_run_eval(self):
benchmark_config = BenchmarkConfig(
eval_candidate=ModelCandidate(

View file

@ -26,9 +26,9 @@ def test_generate_chunk_id():
chunk_ids = sorted([chunk.chunk_id for chunk in chunks])
assert chunk_ids == [
"177a1368-f6a8-0c50-6e92-18677f2c3de3",
"bc744db3-1b25-0a9c-cdff-b6ba3df73c36",
"f68df25d-d9aa-ab4d-5684-64a233add20d",
"31d1f9a3-c8d2-66e7-3c37-af2acd329778",
"d07dade7-29c0-cda7-df29-0249a1dcbc3e",
"d14f75a1-5855-7f72-2c78-d9fc4275a346",
]
@ -36,14 +36,14 @@ def test_generate_chunk_id_with_window():
chunk = Chunk(content="test", metadata={"document_id": "doc-1"})
chunk_id1 = generate_chunk_id("doc-1", chunk, chunk_window="0-1")
chunk_id2 = generate_chunk_id("doc-1", chunk, chunk_window="1-2")
assert chunk_id1 == "149018fe-d0eb-0f8d-5f7f-726bdd2aeedb"
assert chunk_id2 == "4562c1ee-9971-1f3b-51a6-7d05e5211154"
assert chunk_id1 == "8630321a-d9cb-2bb6-cd28-ebf68dafd866"
assert chunk_id2 == "13a1c09a-cbda-b61a-2d1a-7baa90888685"
def test_chunk_id():
# Test with existing chunk ID
chunk_with_id = Chunk(content="test", metadata={"document_id": "existing-id"})
assert chunk_with_id.chunk_id == "84ededcc-b80b-a83e-1a20-ca6515a11350"
assert chunk_with_id.chunk_id == "11704f92-42b6-61df-bf85-6473e7708fbd"
# Test with document ID in metadata
chunk_with_doc_id = Chunk(content="test", metadata={"document_id": "doc-1"})